content/media/MediaDecoderStateMachine.cpp

Fri, 16 Jan 2015 04:50:19 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Fri, 16 Jan 2015 04:50:19 +0100
branch
TOR_BUG_9701
changeset 13
44a2da4a2ab2
permissions
-rw-r--r--

Replace accessor implementation with direct member state manipulation, by
request https://trac.torproject.org/projects/tor/ticket/9701#comment:32

     1 /* vim:set ts=2 sw=2 sts=2 et cindent: */
     2 /* This Source Code Form is subject to the terms of the Mozilla Public
     3  * License, v. 2.0. If a copy of the MPL was not distributed with this
     4  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     6 #ifdef XP_WIN
     7 // Include Windows headers required for enabling high precision timers.
     8 #include "windows.h"
     9 #include "mmsystem.h"
    10 #endif
    12 #include "mozilla/DebugOnly.h"
    13 #include <stdint.h>
    15 #include "MediaDecoderStateMachine.h"
    16 #include "AudioStream.h"
    17 #include "nsTArray.h"
    18 #include "MediaDecoder.h"
    19 #include "MediaDecoderReader.h"
    20 #include "mozilla/mozalloc.h"
    21 #include "VideoUtils.h"
    22 #include "mozilla/dom/TimeRanges.h"
    23 #include "nsDeque.h"
    24 #include "AudioSegment.h"
    25 #include "VideoSegment.h"
    26 #include "ImageContainer.h"
    27 #include "nsComponentManagerUtils.h"
    28 #include "nsITimer.h"
    29 #include "nsContentUtils.h"
    30 #include "MediaShutdownManager.h"
    31 #include "SharedThreadPool.h"
    32 #include "MediaTaskQueue.h"
    33 #include "nsIEventTarget.h"
    34 #include "prenv.h"
    35 #include "mozilla/Preferences.h"
    36 #include "gfx2DGlue.h"
    38 #include <algorithm>
    40 namespace mozilla {
    42 using namespace mozilla::layers;
    43 using namespace mozilla::dom;
    44 using namespace mozilla::gfx;
    46 // avoid redefined macro in unified build
    47 #undef DECODER_LOG
    48 #undef VERBOSE_LOG
    50 #ifdef PR_LOGGING
    51 extern PRLogModuleInfo* gMediaDecoderLog;
    52 #define DECODER_LOG(type, msg, ...) \
    53   PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__))
    54 #define VERBOSE_LOG(msg, ...)                          \
    55     PR_BEGIN_MACRO                                     \
    56       if (!PR_GetEnv("MOZ_QUIET")) {                   \
    57         DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
    58       }                                                \
    59     PR_END_MACRO
    60 #else
    61 #define DECODER_LOG(type, msg, ...)
    62 #define VERBOSE_LOG(msg, ...)
    63 #endif
    65 // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
    66 // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
    67 // implementation.  With unified builds, putting this in headers is not enough.
    68 #ifdef GetCurrentTime
    69 #undef GetCurrentTime
    70 #endif
    72 // Wait this number of seconds when buffering, then leave and play
    73 // as best as we can if the required amount of data hasn't been
    74 // retrieved.
    75 static const uint32_t BUFFERING_WAIT_S = 30;
    77 // If audio queue has less than this many usecs of decoded audio, we won't risk
    78 // trying to decode the video, we'll skip decoding video up to the next
    79 // keyframe. We may increase this value for an individual decoder if we
    80 // encounter video frames which take a long time to decode.
    81 static const uint32_t LOW_AUDIO_USECS = 300000;
    83 // If more than this many usecs of decoded audio is queued, we'll hold off
    84 // decoding more audio. If we increase the low audio threshold (see
    85 // LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
    86 // less than the low audio threshold.
    87 const int64_t AMPLE_AUDIO_USECS = 1000000;
    89 // When we're only playing audio and we don't have a video stream, we divide
    90 // AMPLE_AUDIO_USECS and LOW_AUDIO_USECS by the following value. This reduces
    91 // the amount of decoded audio we buffer, reducing our memory usage. We only
    92 // need to decode far ahead when we're decoding video using software decoding,
    93 // as otherwise a long video decode could cause an audio underrun.
    94 const int64_t NO_VIDEO_AMPLE_AUDIO_DIVISOR = 8;
    96 // Maximum number of bytes we'll allocate and write at once to the audio
    97 // hardware when the audio stream contains missing frames and we're
    98 // writing silence in order to fill the gap. We limit our silence-writes
    99 // to 32KB in order to avoid allocating an impossibly large chunk of
   100 // memory if we encounter a large chunk of silence.
   101 const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
   103 // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
   104 // we're not "prerolling video", we'll skip the video up to the next keyframe
   105 // which is at or after the current playback position.
   106 static const uint32_t LOW_VIDEO_FRAMES = 1;
   108 // Arbitrary "frame duration" when playing only audio.
   109 static const int AUDIO_DURATION_USECS = 40000;
   111 // If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we
   112 // use this as a factor in all our calculations. Increasing this will cause
   113 // us to be more likely to increase our low audio threshold, and to
   114 // increase it by more.
   115 static const int THRESHOLD_FACTOR = 2;
   117 // If we have less than this much undecoded data available, we'll consider
   118 // ourselves to be running low on undecoded data. We determine how much
   119 // undecoded data we have remaining using the reader's GetBuffered()
   120 // implementation.
   121 static const int64_t LOW_DATA_THRESHOLD_USECS = 5000000;
   123 // LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise
   124 // the skip-to-keyframe logic can activate when we're running low on data.
   125 static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
   126               "LOW_DATA_THRESHOLD_USECS is too small");
   128 // Amount of excess usecs of data to add in to the "should we buffer" calculation.
   129 static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 60000;
   131 // If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting
   132 // decoding, we'll enter "quick buffering" mode, which exits a lot sooner than
   133 // normal buffering mode. This exists so that if the decode-ahead exhausts the
   134 // downloaded data while decode/playback is just starting up (for example
   135 // after a seek while the media is still playing, or when playing a media
   136 // as soon as it's load started), we won't necessarily stop for 30s and wait
   137 // for buffering. We may actually be able to playback in this case, so exit
   138 // buffering early and try to play. If it turns out we can't play, we'll fall
   139 // back to buffering normally.
   140 static const uint32_t QUICK_BUFFER_THRESHOLD_USECS = 2000000;
   142 // If we're quick buffering, we'll remain in buffering mode while we have less than
   143 // QUICK_BUFFERING_LOW_DATA_USECS of decoded data available.
   144 static const uint32_t QUICK_BUFFERING_LOW_DATA_USECS = 1000000;
   146 // If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit
   147 // quick buffering in a timely fashion, as the decode pauses when it
   148 // reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach
   149 // QUICK_BUFFERING_LOW_DATA_USECS.
   150 static_assert(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS,
   151               "QUICK_BUFFERING_LOW_DATA_USECS is too large");
   153 // This value has been chosen empirically.
   154 static const uint32_t AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS = 200000;
   156 // The amount of instability we tollerate in calls to
   157 // MediaDecoderStateMachine::UpdateEstimatedDuration(); changes of duration
   158 // less than this are ignored, as they're assumed to be the result of
   159 // instability in the duration estimation.
   160 static const int64_t ESTIMATED_DURATION_FUZZ_FACTOR_USECS = USECS_PER_S / 2;
   162 static TimeDuration UsecsToDuration(int64_t aUsecs) {
   163   return TimeDuration::FromMilliseconds(static_cast<double>(aUsecs) / USECS_PER_MS);
   164 }
   166 static int64_t DurationToUsecs(TimeDuration aDuration) {
   167   return static_cast<int64_t>(aDuration.ToSeconds() * USECS_PER_S);
   168 }
   170 MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
   171                                                    MediaDecoderReader* aReader,
   172                                                    bool aRealTime) :
   173   mDecoder(aDecoder),
   174   mState(DECODER_STATE_DECODING_METADATA),
   175   mInRunningStateMachine(false),
   176   mSyncPointInMediaStream(-1),
   177   mSyncPointInDecodedStream(-1),
   178   mResetPlayStartTime(false),
   179   mPlayDuration(0),
   180   mStartTime(-1),
   181   mEndTime(-1),
   182   mFragmentEndTime(-1),
   183   mReader(aReader),
   184   mCurrentFrameTime(0),
   185   mAudioStartTime(-1),
   186   mAudioEndTime(-1),
   187   mVideoFrameEndTime(-1),
   188   mVolume(1.0),
   189   mPlaybackRate(1.0),
   190   mPreservesPitch(true),
   191   mBasePosition(0),
   192   mAmpleVideoFrames(2),
   193   mLowAudioThresholdUsecs(LOW_AUDIO_USECS),
   194   mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS),
   195   mDispatchedAudioDecodeTask(false),
   196   mDispatchedVideoDecodeTask(false),
   197   mIsReaderIdle(false),
   198   mAudioCaptured(false),
   199   mTransportSeekable(true),
   200   mMediaSeekable(true),
   201   mPositionChangeQueued(false),
   202   mAudioCompleted(false),
   203   mGotDurationFromMetaData(false),
   204   mDispatchedEventToDecode(false),
   205   mStopAudioThread(true),
   206   mQuickBuffering(false),
   207   mMinimizePreroll(false),
   208   mDecodeThreadWaiting(false),
   209   mRealTime(aRealTime),
   210   mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED),
   211   mTimerId(0)
   212 {
   213   MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   214   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   216   // Only enable realtime mode when "media.realtime_decoder.enabled" is true.
   217   if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false)
   218     mRealTime = false;
   220   mAmpleVideoFrames =
   221     std::max<uint32_t>(Preferences::GetUint("media.video-queue.default-size", 10), 3);
   223   mBufferingWait = mRealTime ? 0 : BUFFERING_WAIT_S;
   224   mLowDataThresholdUsecs = mRealTime ? 0 : LOW_DATA_THRESHOLD_USECS;
   226   mVideoPrerollFrames = mRealTime ? 0 : mAmpleVideoFrames / 2;
   227   mAudioPrerollUsecs = mRealTime ? 0 : LOW_AUDIO_USECS * 2;
   229 #ifdef XP_WIN
   230   // Ensure high precision timers are enabled on Windows, otherwise the state
   231   // machine thread isn't woken up at reliable intervals to set the next frame,
   232   // and we drop frames while painting. Note that multiple calls to this
   233   // function per-process is OK, provided each call is matched by a corresponding
   234   // timeEndPeriod() call.
   235   timeBeginPeriod(1);
   236 #endif
   237 }
   239 MediaDecoderStateMachine::~MediaDecoderStateMachine()
   240 {
   241   MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread.");
   242   MOZ_COUNT_DTOR(MediaDecoderStateMachine);
   243   NS_ASSERTION(!mPendingWakeDecoder.get(),
   244                "WakeDecoder should have been revoked already");
   246   MOZ_ASSERT(!mDecodeTaskQueue, "Should be released in SHUTDOWN");
   247   // No need to cancel the timer here for we've done that in SHUTDOWN.
   248   MOZ_ASSERT(!mTimer, "Should be released in SHUTDOWN");
   249   mReader = nullptr;
   251 #ifdef XP_WIN
   252   timeEndPeriod(1);
   253 #endif
   254 }
   256 bool MediaDecoderStateMachine::HasFutureAudio() {
   257   AssertCurrentThreadInMonitor();
   258   NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio");
   259   // We've got audio ready to play if:
   260   // 1. We've not completed playback of audio, and
   261   // 2. we either have more than the threshold of decoded audio available, or
   262   //    we've completely decoded all audio (but not finished playing it yet
   263   //    as per 1).
   264   return !mAudioCompleted &&
   265          (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished());
   266 }
   268 bool MediaDecoderStateMachine::HaveNextFrameData() {
   269   AssertCurrentThreadInMonitor();
   270   return (!HasAudio() || HasFutureAudio()) &&
   271          (!HasVideo() || VideoQueue().GetSize() > 0);
   272 }
   274 int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() {
   275   NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(),
   276                "Should be on decode thread or state machine thread");
   277   AssertCurrentThreadInMonitor();
   278   int64_t audioDecoded = AudioQueue().Duration();
   279   if (mAudioEndTime != -1) {
   280     audioDecoded += mAudioEndTime - GetMediaTime();
   281   }
   282   return audioDecoded;
   283 }
   285 void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
   286                                                DecodedStreamData* aStream,
   287                                                AudioSegment* aOutput)
   288 {
   289   NS_ASSERTION(OnDecodeThread() ||
   290                OnStateMachineThread(), "Should be on decode thread or state machine thread");
   291   AssertCurrentThreadInMonitor();
   293   if (aAudio->mTime <= aStream->mLastAudioPacketTime) {
   294     // ignore packet that we've already processed
   295     return;
   296   }
   297   aStream->mLastAudioPacketTime = aAudio->mTime;
   298   aStream->mLastAudioPacketEndTime = aAudio->GetEndTime();
   300   // This logic has to mimic AudioLoop closely to make sure we write
   301   // the exact same silences
   302   CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudio.mRate,
   303       aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten;
   304   CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudio.mRate, aAudio->mTime);
   305   if (!audioWrittenOffset.isValid() || !frameOffset.isValid())
   306     return;
   307   if (audioWrittenOffset.value() < frameOffset.value()) {
   308     // Write silence to catch up
   309     VERBOSE_LOG("writing %d frames of silence to MediaStream",
   310                 int32_t(frameOffset.value() - audioWrittenOffset.value()));
   311     AudioSegment silence;
   312     silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value());
   313     aStream->mAudioFramesWritten += silence.GetDuration();
   314     aOutput->AppendFrom(&silence);
   315   }
   317   int64_t offset;
   318   if (aStream->mAudioFramesWritten == 0) {
   319     NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(),
   320                  "Otherwise we'd have taken the write-silence path");
   321     // We're starting in the middle of a packet. Split the packet.
   322     offset = audioWrittenOffset.value() - frameOffset.value();
   323   } else {
   324     // Write the entire packet.
   325     offset = 0;
   326   }
   328   if (offset >= aAudio->mFrames)
   329     return;
   331   aAudio->EnsureAudioBuffer();
   332   nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
   333   AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
   334   nsAutoTArray<const AudioDataValue*,2> channels;
   335   for (uint32_t i = 0; i < aAudio->mChannels; ++i) {
   336     channels.AppendElement(bufferData + i*aAudio->mFrames + offset);
   337   }
   338   aOutput->AppendFrames(buffer.forget(), channels, aAudio->mFrames);
   339   VERBOSE_LOG("writing %d frames of data to MediaStream for AudioData at %lld",
   340               aAudio->mFrames - int32_t(offset), aAudio->mTime);
   341   aStream->mAudioFramesWritten += aAudio->mFrames - int32_t(offset);
   342 }
   344 static void WriteVideoToMediaStream(layers::Image* aImage,
   345                                     int64_t aDuration,
   346                                     const IntSize& aIntrinsicSize,
   347                                     VideoSegment* aOutput)
   348 {
   349   nsRefPtr<layers::Image> image = aImage;
   350   aOutput->AppendFrame(image.forget(), aDuration, aIntrinsicSize);
   351 }
   353 static const TrackID TRACK_AUDIO = 1;
   354 static const TrackID TRACK_VIDEO = 2;
   355 static const TrackRate RATE_VIDEO = USECS_PER_S;
   357 void MediaDecoderStateMachine::SendStreamData()
   358 {
   359   NS_ASSERTION(OnDecodeThread() ||
   360                OnStateMachineThread(), "Should be on decode thread or state machine thread");
   361   AssertCurrentThreadInMonitor();
   363   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   364   if (!stream)
   365     return;
   367   if (mState == DECODER_STATE_DECODING_METADATA)
   368     return;
   370   // If there's still an audio thread alive, then we can't send any stream
   371   // data yet since both SendStreamData and the audio thread want to be in
   372   // charge of popping the audio queue. We're waiting for the audio thread
   373   // to die before sending anything to our stream.
   374   if (mAudioThread)
   375     return;
   377   int64_t minLastAudioPacketTime = INT64_MAX;
   378   bool finished =
   379       (!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
   380       (!mInfo.HasVideo() || VideoQueue().IsFinished());
   381   if (mDecoder->IsSameOriginMedia()) {
   382     SourceMediaStream* mediaStream = stream->mStream;
   383     StreamTime endPosition = 0;
   385     if (!stream->mStreamInitialized) {
   386       if (mInfo.HasAudio()) {
   387         AudioSegment* audio = new AudioSegment();
   388         mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio);
   389         stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO,
   390             GetStateMachineThread(), GetWakeDecoderRunnable());
   391       }
   392       if (mInfo.HasVideo()) {
   393         VideoSegment* video = new VideoSegment();
   394         mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
   395         stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO,
   396             GetStateMachineThread(), GetWakeDecoderRunnable());
   397       }
   398       stream->mStreamInitialized = true;
   399     }
   401     if (mInfo.HasAudio()) {
   402       nsAutoTArray<AudioData*,10> audio;
   403       // It's OK to hold references to the AudioData because while audio
   404       // is captured, only the decoder thread pops from the queue (see below).
   405       AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
   406       AudioSegment output;
   407       for (uint32_t i = 0; i < audio.Length(); ++i) {
   408         SendStreamAudio(audio[i], stream, &output);
   409       }
   410       if (output.GetDuration() > 0) {
   411         mediaStream->AppendToTrack(TRACK_AUDIO, &output);
   412       }
   413       if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
   414         mediaStream->EndTrack(TRACK_AUDIO);
   415         stream->mHaveSentFinishAudio = true;
   416       }
   417       minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime);
   418       endPosition = std::max(endPosition,
   419           TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten));
   420     }
   422     if (mInfo.HasVideo()) {
   423       nsAutoTArray<VideoData*,10> video;
   424       // It's OK to hold references to the VideoData only the decoder thread
   425       // pops from the queue.
   426       VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
   427       VideoSegment output;
   428       for (uint32_t i = 0; i < video.Length(); ++i) {
   429         VideoData* v = video[i];
   430         if (stream->mNextVideoTime < v->mTime) {
   431           VERBOSE_LOG("writing last video to MediaStream %p for %lldus",
   432                       mediaStream, v->mTime - stream->mNextVideoTime);
   433           // Write last video frame to catch up. mLastVideoImage can be null here
   434           // which is fine, it just means there's no video.
   435           WriteVideoToMediaStream(stream->mLastVideoImage,
   436             v->mTime - stream->mNextVideoTime, stream->mLastVideoImageDisplaySize,
   437               &output);
   438           stream->mNextVideoTime = v->mTime;
   439         }
   440         if (stream->mNextVideoTime < v->GetEndTime()) {
   441           VERBOSE_LOG("writing video frame %lldus to MediaStream %p for %lldus",
   442                       v->mTime, mediaStream, v->GetEndTime() - stream->mNextVideoTime);
   443           WriteVideoToMediaStream(v->mImage,
   444               v->GetEndTime() - stream->mNextVideoTime, v->mDisplay,
   445               &output);
   446           stream->mNextVideoTime = v->GetEndTime();
   447           stream->mLastVideoImage = v->mImage;
   448           stream->mLastVideoImageDisplaySize = v->mDisplay;
   449         } else {
   450           VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream",
   451                       v->mTime, v->GetEndTime());
   452         }
   453       }
   454       if (output.GetDuration() > 0) {
   455         mediaStream->AppendToTrack(TRACK_VIDEO, &output);
   456       }
   457       if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
   458         mediaStream->EndTrack(TRACK_VIDEO);
   459         stream->mHaveSentFinishVideo = true;
   460       }
   461       endPosition = std::max(endPosition,
   462           TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
   463     }
   465     if (!stream->mHaveSentFinish) {
   466       stream->mStream->AdvanceKnownTracksTime(endPosition);
   467     }
   469     if (finished && !stream->mHaveSentFinish) {
   470       stream->mHaveSentFinish = true;
   471       stream->mStream->Finish();
   472     }
   473   }
   475   if (mAudioCaptured) {
   476     // Discard audio packets that are no longer needed.
   477     while (true) {
   478       const AudioData* a = AudioQueue().PeekFront();
   479       // Packet times are not 100% reliable so this may discard packets that
   480       // actually contain data for mCurrentFrameTime. This means if someone might
   481       // create a new output stream and we actually don't have the audio for the
   482       // very start. That's OK, we'll play silence instead for a brief moment.
   483       // That's OK. Seeking to this time would have a similar issue for such
   484       // badly muxed resources.
   485       if (!a || a->GetEndTime() >= minLastAudioPacketTime)
   486         break;
   487       mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
   488       delete AudioQueue().PopFront();
   489     }
   491     if (finished) {
   492       mAudioCompleted = true;
   493       UpdateReadyState();
   494     }
   495   }
   496 }
   498 MediaDecoderStateMachine::WakeDecoderRunnable*
   499 MediaDecoderStateMachine::GetWakeDecoderRunnable()
   500 {
   501   AssertCurrentThreadInMonitor();
   503   if (!mPendingWakeDecoder.get()) {
   504     mPendingWakeDecoder = new WakeDecoderRunnable(this);
   505   }
   506   return mPendingWakeDecoder.get();
   507 }
   509 bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
   510 {
   511   AssertCurrentThreadInMonitor();
   513   if (AudioQueue().GetSize() == 0 ||
   514       GetDecodedAudioDuration() < aAmpleAudioUSecs) {
   515     return false;
   516   }
   517   if (!mAudioCaptured) {
   518     return true;
   519   }
   521   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   522   if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
   523     if (!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) {
   524       return false;
   525     }
   526     stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO,
   527         GetStateMachineThread(), GetWakeDecoderRunnable());
   528   }
   530   return true;
   531 }
   533 bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
   534 {
   535   AssertCurrentThreadInMonitor();
   537   if (static_cast<uint32_t>(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) {
   538     return false;
   539   }
   541   DecodedStreamData* stream = mDecoder->GetDecodedStream();
   542   if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
   543     if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
   544       return false;
   545     }
   546     stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO,
   547         GetStateMachineThread(), GetWakeDecoderRunnable());
   548   }
   550   return true;
   551 }
   553 bool
   554 MediaDecoderStateMachine::NeedToDecodeVideo()
   555 {
   556   AssertCurrentThreadInMonitor();
   557   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
   558                "Should be on state machine or decode thread.");
   559   return mIsVideoDecoding &&
   560          !mMinimizePreroll &&
   561          !HaveEnoughDecodedVideo();
   562 }
   564 void
   565 MediaDecoderStateMachine::DecodeVideo()
   566 {
   567   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   568   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   570   if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
   571     mDispatchedVideoDecodeTask = false;
   572     return;
   573   }
   574   EnsureActive();
   576   // We don't want to consider skipping to the next keyframe if we've
   577   // only just started up the decode loop, so wait until we've decoded
   578   // some frames before enabling the keyframe skip logic on video.
   579   if (mIsVideoPrerolling &&
   580       (static_cast<uint32_t>(VideoQueue().GetSize())
   581         >= mVideoPrerollFrames * mPlaybackRate))
   582   {
   583     mIsVideoPrerolling = false;
   584   }
   586   // We'll skip the video decode to the nearest keyframe if we're low on
   587   // audio, or if we're low on video, provided we're not running low on
   588   // data to decode. If we're running low on downloaded data to decode,
   589   // we won't start keyframe skipping, as we'll be pausing playback to buffer
   590   // soon anyway and we'll want to be able to display frames immediately
   591   // after buffering finishes.
   592   if (mState == DECODER_STATE_DECODING &&
   593       !mSkipToNextKeyFrame &&
   594       mIsVideoDecoding &&
   595       ((!mIsAudioPrerolling && mIsAudioDecoding &&
   596         GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
   597         (!mIsVideoPrerolling && mIsVideoDecoding &&
   598          // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
   599          // we are still in the safe range without underrunning video frames
   600          GetClock() > mVideoFrameEndTime &&
   601         (static_cast<uint32_t>(VideoQueue().GetSize())
   602           < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
   603       !HasLowUndecodedData())
   604   {
   605     mSkipToNextKeyFrame = true;
   606     DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
   607   }
   609   // Time the video decode, so that if it's slow, we can increase our low
   610   // audio threshold to reduce the chance of an audio underrun while we're
   611   // waiting for a video decode to complete.
   612   TimeDuration decodeTime;
   613   {
   614     int64_t currentTime = GetMediaTime();
   615     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
   616     TimeStamp start = TimeStamp::Now();
   617     mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime);
   618     decodeTime = TimeStamp::Now() - start;
   619   }
   620   if (!mIsVideoDecoding) {
   621     // Playback ended for this stream, close the sample queue.
   622     VideoQueue().Finish();
   623     CheckIfDecodeComplete();
   624   }
   626   if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
   627       !HasLowUndecodedData())
   628   {
   629     mLowAudioThresholdUsecs =
   630       std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
   631     mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
   632                                           mAmpleAudioThresholdUsecs);
   633     DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
   634                 mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
   635   }
   637   SendStreamData();
   639   // The ready state can change when we've decoded data, so update the
   640   // ready state, so that DOM events can fire.
   641   UpdateReadyState();
   643   mDispatchedVideoDecodeTask = false;
   644   DispatchDecodeTasksIfNeeded();
   645 }
   647 bool
   648 MediaDecoderStateMachine::NeedToDecodeAudio()
   649 {
   650   AssertCurrentThreadInMonitor();
   651   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
   652                "Should be on state machine or decode thread.");
   653   return mIsAudioDecoding &&
   654          !mMinimizePreroll &&
   655          !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate);
   656 }
   658 void
   659 MediaDecoderStateMachine::DecodeAudio()
   660 {
   661   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   662   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   664   if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
   665     mDispatchedAudioDecodeTask = false;
   666     return;
   667   }
   668   EnsureActive();
   670   // We don't want to consider skipping to the next keyframe if we've
   671   // only just started up the decode loop, so wait until we've decoded
   672   // some audio data before enabling the keyframe skip logic on audio.
   673   if (mIsAudioPrerolling &&
   674       GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
   675     mIsAudioPrerolling = false;
   676   }
   678   {
   679     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
   680     mIsAudioDecoding = mReader->DecodeAudioData();
   681   }
   682   if (!mIsAudioDecoding) {
   683     // Playback ended for this stream, close the sample queue.
   684     AudioQueue().Finish();
   685     CheckIfDecodeComplete();
   686   }
   688   SendStreamData();
   690   // Notify to ensure that the AudioLoop() is not waiting, in case it was
   691   // waiting for more audio to be decoded.
   692   mDecoder->GetReentrantMonitor().NotifyAll();
   694   // The ready state can change when we've decoded data, so update the
   695   // ready state, so that DOM events can fire.
   696   UpdateReadyState();
   698   mDispatchedAudioDecodeTask = false;
   699   DispatchDecodeTasksIfNeeded();
   700 }
   702 void
   703 MediaDecoderStateMachine::CheckIfDecodeComplete()
   704 {
   705   AssertCurrentThreadInMonitor();
   706   if (mState == DECODER_STATE_SHUTDOWN ||
   707       mState == DECODER_STATE_SEEKING ||
   708       mState == DECODER_STATE_COMPLETED) {
   709     // Don't change our state if we've already been shutdown, or we're seeking,
   710     // since we don't want to abort the shutdown or seek processes.
   711     return;
   712   }
   713   MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
   714   MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
   715   if (!mIsVideoDecoding && !mIsAudioDecoding) {
   716     // We've finished decoding all active streams,
   717     // so move to COMPLETED state.
   718     mState = DECODER_STATE_COMPLETED;
   719     DispatchDecodeTasksIfNeeded();
   720     ScheduleStateMachine();
   721   }
   722   DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted",
   723               ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT "));
   724 }
   726 bool MediaDecoderStateMachine::IsPlaying()
   727 {
   728   AssertCurrentThreadInMonitor();
   730   return !mPlayStartTime.IsNull();
   731 }
   733 // If we have already written enough frames to the AudioStream, start the
   734 // playback.
   735 static void
   736 StartAudioStreamPlaybackIfNeeded(AudioStream* aStream)
   737 {
   738   // We want to have enough data in the buffer to start the stream.
   739   if (static_cast<double>(aStream->GetWritten()) / aStream->GetRate() >=
   740       static_cast<double>(AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) {
   741     aStream->Start();
   742   }
   743 }
   745 static void WriteSilence(AudioStream* aStream, uint32_t aFrames)
   746 {
   747   uint32_t numSamples = aFrames * aStream->GetChannels();
   748   nsAutoTArray<AudioDataValue, 1000> buf;
   749   buf.SetLength(numSamples);
   750   memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
   751   aStream->Write(buf.Elements(), aFrames);
   753   StartAudioStreamPlaybackIfNeeded(aStream);
   754 }
   756 void MediaDecoderStateMachine::AudioLoop()
   757 {
   758   NS_ASSERTION(OnAudioThread(), "Should be on audio thread.");
   759   DECODER_LOG(PR_LOG_DEBUG, "Begun audio thread/loop");
   760   int64_t audioDuration = 0;
   761   int64_t audioStartTime = -1;
   762   uint32_t channels, rate;
   763   double volume = -1;
   764   bool setVolume;
   765   double playbackRate = -1;
   766   bool setPlaybackRate;
   767   bool preservesPitch;
   768   bool setPreservesPitch;
   769   AudioChannel audioChannel;
   771   {
   772     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   773     mAudioCompleted = false;
   774     audioStartTime = mAudioStartTime;
   775     NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now");
   776     channels = mInfo.mAudio.mChannels;
   777     rate = mInfo.mAudio.mRate;
   779     audioChannel = mDecoder->GetAudioChannel();
   780     volume = mVolume;
   781     preservesPitch = mPreservesPitch;
   782     playbackRate = mPlaybackRate;
   783   }
   785   {
   786     // AudioStream initialization can block for extended periods in unusual
   787     // circumstances, so we take care to drop the decoder monitor while
   788     // initializing.
   789     RefPtr<AudioStream> audioStream(new AudioStream());
   790     audioStream->Init(channels, rate, audioChannel, AudioStream::HighLatency);
   791     audioStream->SetVolume(volume);
   792     if (audioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
   793       NS_WARNING("Setting the pitch preservation failed at AudioLoop start.");
   794     }
   795     if (playbackRate != 1.0) {
   796       NS_ASSERTION(playbackRate != 0,
   797                    "Don't set the playbackRate to 0 on an AudioStream.");
   798       if (audioStream->SetPlaybackRate(playbackRate) != NS_OK) {
   799         NS_WARNING("Setting the playback rate failed at AudioLoop start.");
   800       }
   801     }
   803     {
   804       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   805       mAudioStream = audioStream.forget();
   806     }
   807   }
   809   while (1) {
   810     // Wait while we're not playing, and we're not shutting down, or we're
   811     // playing and we've got no audio to play.
   812     {
   813       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   814       NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA,
   815                    "Should have meta data before audio started playing.");
   816       while (mState != DECODER_STATE_SHUTDOWN &&
   817              !mStopAudioThread &&
   818              (!IsPlaying() ||
   819               mState == DECODER_STATE_BUFFERING ||
   820               (AudioQueue().GetSize() == 0 &&
   821                !AudioQueue().AtEndOfStream())))
   822       {
   823         if (!IsPlaying() && !mAudioStream->IsPaused()) {
   824           mAudioStream->Pause();
   825         }
   826         mon.Wait();
   827       }
   829       // If we're shutting down, break out and exit the audio thread.
   830       // Also break out if audio is being captured.
   831       if (mState == DECODER_STATE_SHUTDOWN ||
   832           mStopAudioThread ||
   833           AudioQueue().AtEndOfStream())
   834       {
   835         break;
   836       }
   838       // We only want to go to the expense of changing the volume if
   839       // the volume has changed.
   840       setVolume = volume != mVolume;
   841       volume = mVolume;
   843       // Same for the playbackRate.
   844       setPlaybackRate = playbackRate != mPlaybackRate;
   845       playbackRate = mPlaybackRate;
   847       // Same for the pitch preservation.
   848       setPreservesPitch = preservesPitch != mPreservesPitch;
   849       preservesPitch = mPreservesPitch;
   851       if (IsPlaying() && mAudioStream->IsPaused()) {
   852         mAudioStream->Resume();
   853       }
   854     }
   856     if (setVolume) {
   857       mAudioStream->SetVolume(volume);
   858     }
   859     if (setPlaybackRate) {
   860       NS_ASSERTION(playbackRate != 0,
   861                    "Don't set the playbackRate to 0 in the AudioStreams");
   862       if (mAudioStream->SetPlaybackRate(playbackRate) != NS_OK) {
   863         NS_WARNING("Setting the playback rate failed in AudioLoop.");
   864       }
   865     }
   866     if (setPreservesPitch) {
   867       if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
   868         NS_WARNING("Setting the pitch preservation failed in AudioLoop.");
   869       }
   870     }
   871     NS_ASSERTION(AudioQueue().GetSize() > 0,
   872                  "Should have data to play");
   873     // See if there's a gap in the audio. If there is, push silence into the
   874     // audio hardware, so we can play across the gap.
   875     const AudioData* s = AudioQueue().PeekFront();
   877     // Calculate the number of frames that have been pushed onto the audio
   878     // hardware.
   879     CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) +
   880                                               audioDuration;
   881     // Calculate the timestamp of the next chunk of audio in numbers of
   882     // samples.
   883     CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate);
   884     CheckedInt64 missingFrames = sampleTime - playedFrames;
   885     if (!missingFrames.isValid() || !sampleTime.isValid()) {
   886       NS_WARNING("Int overflow adding in AudioLoop()");
   887       break;
   888     }
   890     int64_t framesWritten = 0;
   891     if (missingFrames.value() > 0) {
   892       // The next audio chunk begins some time after the end of the last chunk
   893       // we pushed to the audio hardware. We must push silence into the audio
   894       // hardware so that the next audio chunk begins playback at the correct
   895       // time.
   896       missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
   897       VERBOSE_LOG("playing %d frames of silence", int32_t(missingFrames.value()));
   898       framesWritten = PlaySilence(static_cast<uint32_t>(missingFrames.value()),
   899                                   channels, playedFrames.value());
   900     } else {
   901       framesWritten = PlayFromAudioQueue(sampleTime.value(), channels);
   902     }
   903     audioDuration += framesWritten;
   904     {
   905       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   906       CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime;
   907       if (!playedUsecs.isValid()) {
   908         NS_WARNING("Int overflow calculating audio end time");
   909         break;
   910       }
   911       mAudioEndTime = playedUsecs.value();
   912     }
   913   }
   914   {
   915     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   916     if (AudioQueue().AtEndOfStream() &&
   917         mState != DECODER_STATE_SHUTDOWN &&
   918         !mStopAudioThread)
   919     {
   920       // If the media was too short to trigger the start of the audio stream,
   921       // start it now.
   922       mAudioStream->Start();
   923       // Last frame pushed to audio hardware, wait for the audio to finish,
   924       // before the audio thread terminates.
   925       bool seeking = false;
   926       {
   927         int64_t oldPosition = -1;
   928         int64_t position = GetMediaTime();
   929         while (oldPosition != position &&
   930                mAudioEndTime - position > 0 &&
   931                mState != DECODER_STATE_SEEKING &&
   932                mState != DECODER_STATE_SHUTDOWN)
   933         {
   934           const int64_t DRAIN_BLOCK_USECS = 100000;
   935           Wait(std::min(mAudioEndTime - position, DRAIN_BLOCK_USECS));
   936           oldPosition = position;
   937           position = GetMediaTime();
   938         }
   939         seeking = mState == DECODER_STATE_SEEKING;
   940       }
   942       if (!seeking && !mAudioStream->IsPaused()) {
   943         {
   944           ReentrantMonitorAutoExit exit(mDecoder->GetReentrantMonitor());
   945           mAudioStream->Drain();
   946         }
   947       }
   948     }
   949   }
   950   DECODER_LOG(PR_LOG_DEBUG, "Reached audio stream end.");
   951   {
   952     // Must hold lock while shutting down and anulling the audio stream to prevent
   953     // state machine thread trying to use it while we're destroying it.
   954     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   955     mAudioStream->Shutdown();
   956     mAudioStream = nullptr;
   957     if (!mAudioCaptured) {
   958       mAudioCompleted = true;
   959       UpdateReadyState();
   960       // Kick the decode thread; it may be sleeping waiting for this to finish.
   961       mDecoder->GetReentrantMonitor().NotifyAll();
   962     }
   963   }
   965   DECODER_LOG(PR_LOG_DEBUG, "Audio stream finished playing, audio thread exit");
   966 }
   968 uint32_t MediaDecoderStateMachine::PlaySilence(uint32_t aFrames,
   969                                                    uint32_t aChannels,
   970                                                    uint64_t aFrameOffset)
   972 {
   973   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   974   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
   975   uint32_t maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue);
   976   uint32_t frames = std::min(aFrames, maxFrames);
   977   WriteSilence(mAudioStream, frames);
   978   return frames;
   979 }
   981 uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset,
   982                                                       uint32_t aChannels)
   983 {
   984   NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   985   NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
   986   nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
   987   {
   988     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   989     NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
   990     // Awaken the decode loop if it's waiting for space to free up in the
   991     // audio queue.
   992     mDecoder->GetReentrantMonitor().NotifyAll();
   993   }
   994   int64_t offset = -1;
   995   uint32_t frames = 0;
   996   VERBOSE_LOG("playing %d frames of data to stream for AudioData at %lld",
   997               audio->mFrames, audio->mTime);
   998   mAudioStream->Write(audio->mAudioData,
   999                       audio->mFrames);
  1001   aChannels = mAudioStream->GetOutChannels();
  1003   StartAudioStreamPlaybackIfNeeded(mAudioStream);
  1005   offset = audio->mOffset;
  1006   frames = audio->mFrames;
  1008   if (offset != -1) {
  1009     mDecoder->UpdatePlaybackOffset(offset);
  1011   return frames;
  1014 nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor)
  1016   MOZ_ASSERT(NS_IsMainThread());
  1018   RefPtr<SharedThreadPool> decodePool(
  1019     SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
  1020                           Preferences::GetUint("media.num-decode-threads", 25)));
  1021   NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE);
  1023   RefPtr<SharedThreadPool> stateMachinePool(
  1024     SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1));
  1025   NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE);
  1027   mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget());
  1028   NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE);
  1030   MediaDecoderReader* cloneReader = nullptr;
  1031   if (aCloneDonor) {
  1032     cloneReader = aCloneDonor->mReader;
  1035   mStateMachineThreadPool = stateMachinePool;
  1037   nsresult rv;
  1038   mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv);
  1039   NS_ENSURE_SUCCESS(rv, rv);
  1040   rv = mTimer->SetTarget(GetStateMachineThread());
  1041   NS_ENSURE_SUCCESS(rv, rv);
  1043   return mReader->Init(cloneReader);
  1046 void MediaDecoderStateMachine::StopPlayback()
  1048   DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()");
  1050   AssertCurrentThreadInMonitor();
  1052   mDecoder->NotifyPlaybackStopped();
  1054   if (IsPlaying()) {
  1055     mPlayDuration = GetClock();
  1056     mPlayStartTime = TimeStamp();
  1058   // Notify the audio thread, so that it notices that we've stopped playing,
  1059   // so it can pause audio playback.
  1060   mDecoder->GetReentrantMonitor().NotifyAll();
  1061   NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()");
  1062   mDecoder->UpdateStreamBlockingForStateMachinePlaying();
  1064   DispatchDecodeTasksIfNeeded();
  1067 void MediaDecoderStateMachine::SetSyncPointForMediaStream()
  1069   AssertCurrentThreadInMonitor();
  1071   DecodedStreamData* stream = mDecoder->GetDecodedStream();
  1072   if (!stream) {
  1073     return;
  1076   mSyncPointInMediaStream = stream->GetLastOutputTime();
  1077   mSyncPointInDecodedStream = mStartTime + mPlayDuration;
  1080 int64_t MediaDecoderStateMachine::GetCurrentTimeViaMediaStreamSync()
  1082   AssertCurrentThreadInMonitor();
  1083   NS_ASSERTION(mSyncPointInDecodedStream >= 0, "Should have set up sync point");
  1084   DecodedStreamData* stream = mDecoder->GetDecodedStream();
  1085   StreamTime streamDelta = stream->GetLastOutputTime() - mSyncPointInMediaStream;
  1086   return mSyncPointInDecodedStream + MediaTimeToMicroseconds(streamDelta);
  1089 void MediaDecoderStateMachine::StartPlayback()
  1091   DECODER_LOG(PR_LOG_DEBUG, "StartPlayback()");
  1093   NS_ASSERTION(!IsPlaying(), "Shouldn't be playing when StartPlayback() is called");
  1094   AssertCurrentThreadInMonitor();
  1096   mDecoder->NotifyPlaybackStarted();
  1097   mPlayStartTime = TimeStamp::Now();
  1099   NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()");
  1100   if (NS_FAILED(StartAudioThread())) {
  1101     NS_WARNING("Failed to create audio thread");
  1103   mDecoder->GetReentrantMonitor().NotifyAll();
  1104   mDecoder->UpdateStreamBlockingForStateMachinePlaying();
  1107 void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
  1109   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1110                "Should be on state machine thread.");
  1111   AssertCurrentThreadInMonitor();
  1113   NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime");
  1114   mCurrentFrameTime = aTime - mStartTime;
  1115   NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!");
  1116   if (aTime > mEndTime) {
  1117     NS_ASSERTION(mCurrentFrameTime > GetDuration(),
  1118                  "CurrentTime must be after duration if aTime > endTime!");
  1119     mEndTime = aTime;
  1120     nsCOMPtr<nsIRunnable> event =
  1121       NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged);
  1122     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1126 void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
  1128   UpdatePlaybackPositionInternal(aTime);
  1130   bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
  1131   if (!mPositionChangeQueued || fragmentEnded) {
  1132     mPositionChangeQueued = true;
  1133     nsCOMPtr<nsIRunnable> event =
  1134       NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackPositionChanged);
  1135     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1138   mMetadataManager.DispatchMetadataIfNeeded(mDecoder, aTime);
  1140   if (fragmentEnded) {
  1141     StopPlayback();
  1145 void MediaDecoderStateMachine::ClearPositionChangeFlag()
  1147   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1148   AssertCurrentThreadInMonitor();
  1150   mPositionChangeQueued = false;
  1153 MediaDecoderOwner::NextFrameStatus MediaDecoderStateMachine::GetNextFrameStatus()
  1155   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1156   if (IsBuffering() || IsSeeking()) {
  1157     return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING;
  1158   } else if (HaveNextFrameData()) {
  1159     return MediaDecoderOwner::NEXT_FRAME_AVAILABLE;
  1161   return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
  1164 void MediaDecoderStateMachine::SetVolume(double volume)
  1166   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1167   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1168   mVolume = volume;
  1171 void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured)
  1173   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1174   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1175   if (!mAudioCaptured && aCaptured && !mStopAudioThread) {
  1176     // Make sure the state machine runs as soon as possible. That will
  1177     // stop the audio thread.
  1178     // If mStopAudioThread is true then we're already stopping the audio thread
  1179     // and since we set mAudioCaptured to true, nothing can start it again.
  1180     ScheduleStateMachine();
  1182   mAudioCaptured = aCaptured;
  1185 double MediaDecoderStateMachine::GetCurrentTime() const
  1187   NS_ASSERTION(NS_IsMainThread() ||
  1188                OnStateMachineThread() ||
  1189                OnDecodeThread(),
  1190                "Should be on main, decode, or state machine thread.");
  1192   return static_cast<double>(mCurrentFrameTime) / static_cast<double>(USECS_PER_S);
  1195 int64_t MediaDecoderStateMachine::GetDuration()
  1197   AssertCurrentThreadInMonitor();
  1199   if (mEndTime == -1 || mStartTime == -1)
  1200     return -1;
  1201   return mEndTime - mStartTime;
  1204 void MediaDecoderStateMachine::SetDuration(int64_t aDuration)
  1206   NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1207                "Should be on main or decode thread.");
  1208   AssertCurrentThreadInMonitor();
  1210   if (aDuration == -1) {
  1211     return;
  1214   if (mStartTime != -1) {
  1215     mEndTime = mStartTime + aDuration;
  1216   } else {
  1217     mStartTime = 0;
  1218     mEndTime = aDuration;
  1222 void MediaDecoderStateMachine::UpdateEstimatedDuration(int64_t aDuration)
  1224   AssertCurrentThreadInMonitor();
  1225   int64_t duration = GetDuration();
  1226   if (aDuration != duration &&
  1227       abs(aDuration - duration) > ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
  1228     SetDuration(aDuration);
  1229     nsCOMPtr<nsIRunnable> event =
  1230       NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged);
  1231     NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1235 void MediaDecoderStateMachine::SetMediaEndTime(int64_t aEndTime)
  1237   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread");
  1238   AssertCurrentThreadInMonitor();
  1240   mEndTime = aEndTime;
  1243 void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime)
  1245   AssertCurrentThreadInMonitor();
  1247   mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime;
  1250 void MediaDecoderStateMachine::SetTransportSeekable(bool aTransportSeekable)
  1252   NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1253       "Should be on main thread or the decoder thread.");
  1254   AssertCurrentThreadInMonitor();
  1256   mTransportSeekable = aTransportSeekable;
  1259 void MediaDecoderStateMachine::SetMediaSeekable(bool aMediaSeekable)
  1261   NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1262       "Should be on main thread or the decoder thread.");
  1264   mMediaSeekable = aMediaSeekable;
  1267 bool MediaDecoderStateMachine::IsDormantNeeded()
  1269   return mReader->IsDormantNeeded();
  1272 void MediaDecoderStateMachine::SetDormant(bool aDormant)
  1274   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1275   AssertCurrentThreadInMonitor();
  1277   if (!mReader) {
  1278     return;
  1281   if (aDormant) {
  1282     ScheduleStateMachine();
  1283     mState = DECODER_STATE_DORMANT;
  1284     mDecoder->GetReentrantMonitor().NotifyAll();
  1285   } else if ((aDormant != true) && (mState == DECODER_STATE_DORMANT)) {
  1286     ScheduleStateMachine();
  1287     mStartTime = 0;
  1288     mCurrentFrameTime = 0;
  1289     mState = DECODER_STATE_DECODING_METADATA;
  1290     mDecoder->GetReentrantMonitor().NotifyAll();
  1294 void MediaDecoderStateMachine::Shutdown()
  1296   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1298   // Once we've entered the shutdown state here there's no going back.
  1299   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1301   // Change state before issuing shutdown request to threads so those
  1302   // threads can start exiting cleanly during the Shutdown call.
  1303   DECODER_LOG(PR_LOG_DEBUG, "Changed state to SHUTDOWN");
  1304   ScheduleStateMachine();
  1305   mState = DECODER_STATE_SHUTDOWN;
  1306   mDecoder->GetReentrantMonitor().NotifyAll();
  1309 void MediaDecoderStateMachine::StartDecoding()
  1311   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1312                "Should be on state machine or decode thread.");
  1313   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1314   if (mState == DECODER_STATE_DECODING) {
  1315     return;
  1317   mState = DECODER_STATE_DECODING;
  1319   mDecodeStartTime = TimeStamp::Now();
  1321   // Reset our "stream finished decoding" flags, so we try to decode all
  1322   // streams that we have when we start decoding.
  1323   mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
  1324   mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
  1326   CheckIfDecodeComplete();
  1327   if (mState == DECODER_STATE_COMPLETED) {
  1328     return;
  1331   // Reset other state to pristine values before starting decode.
  1332   mSkipToNextKeyFrame = false;
  1333   mIsAudioPrerolling = true;
  1334   mIsVideoPrerolling = true;
  1336   // Ensure that we've got tasks enqueued to decode data if we need to.
  1337   DispatchDecodeTasksIfNeeded();
  1339   ScheduleStateMachine();
  1342 void MediaDecoderStateMachine::StartWaitForResources()
  1344   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1345                "Should be on state machine or decode thread.");
  1346   AssertCurrentThreadInMonitor();
  1347   mState = DECODER_STATE_WAIT_FOR_RESOURCES;
  1350 void MediaDecoderStateMachine::NotifyWaitingForResourcesStatusChanged()
  1352   AssertCurrentThreadInMonitor();
  1353   if (mState != DECODER_STATE_WAIT_FOR_RESOURCES ||
  1354       mReader->IsWaitingMediaResources()) {
  1355     return;
  1357   // The reader is no longer waiting for resources (say a hardware decoder),
  1358   // we can now proceed to decode metadata.
  1359   mState = DECODER_STATE_DECODING_METADATA;
  1360   EnqueueDecodeMetadataTask();
  1363 void MediaDecoderStateMachine::Play()
  1365   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1366   // When asked to play, switch to decoding state only if
  1367   // we are currently buffering. In other cases, we'll start playing anyway
  1368   // when the state machine notices the decoder's state change to PLAYING.
  1369   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1370   if (mState == DECODER_STATE_BUFFERING) {
  1371     DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING");
  1372     mState = DECODER_STATE_DECODING;
  1373     mDecodeStartTime = TimeStamp::Now();
  1375   // Once we start playing, we don't want to minimize our prerolling, as we
  1376   // assume the user is likely to want to keep playing in future.
  1377   mMinimizePreroll = false;
  1378   ScheduleStateMachine();
  1381 void MediaDecoderStateMachine::ResetPlayback()
  1383   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1384   mVideoFrameEndTime = -1;
  1385   mAudioStartTime = -1;
  1386   mAudioEndTime = -1;
  1387   mAudioCompleted = false;
  1390 void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
  1391                                                      uint32_t aLength,
  1392                                                      int64_t aOffset)
  1394   NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
  1395   mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
  1397   // While playing an unseekable stream of unknown duration, mEndTime is
  1398   // updated (in AdvanceFrame()) as we play. But if data is being downloaded
  1399   // faster than played, mEndTime won't reflect the end of playable data
  1400   // since we haven't played the frame at the end of buffered data. So update
  1401   // mEndTime here as new data is downloaded to prevent such a lag.
  1402   dom::TimeRanges buffered;
  1403   if (mDecoder->IsInfinite() &&
  1404       NS_SUCCEEDED(mDecoder->GetBuffered(&buffered)))
  1406     uint32_t length = 0;
  1407     buffered.GetLength(&length);
  1408     if (length) {
  1409       double end = 0;
  1410       buffered.End(length - 1, &end);
  1411       ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1412       mEndTime = std::max<int64_t>(mEndTime, end * USECS_PER_S);
  1417 void MediaDecoderStateMachine::Seek(const SeekTarget& aTarget)
  1419   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1420   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1422   // We need to be able to seek both at a transport level and at a media level
  1423   // to seek.
  1424   if (!mMediaSeekable) {
  1425     return;
  1427   // MediaDecoder::mPlayState should be SEEKING while we seek, and
  1428   // in that case MediaDecoder shouldn't be calling us.
  1429   NS_ASSERTION(mState != DECODER_STATE_SEEKING,
  1430                "We shouldn't already be seeking");
  1431   NS_ASSERTION(mState >= DECODER_STATE_DECODING,
  1432                "We should have loaded metadata");
  1434   // Bound the seek time to be inside the media range.
  1435   NS_ASSERTION(mStartTime != -1, "Should know start time by now");
  1436   NS_ASSERTION(mEndTime != -1, "Should know end time by now");
  1437   int64_t seekTime = aTarget.mTime + mStartTime;
  1438   seekTime = std::min(seekTime, mEndTime);
  1439   seekTime = std::max(mStartTime, seekTime);
  1440   NS_ASSERTION(seekTime >= mStartTime && seekTime <= mEndTime,
  1441                "Can only seek in range [0,duration]");
  1442   mSeekTarget = SeekTarget(seekTime, aTarget.mType);
  1444   mBasePosition = seekTime - mStartTime;
  1445   DECODER_LOG(PR_LOG_DEBUG, "Changed state to SEEKING (to %lld)", mSeekTarget.mTime);
  1446   mState = DECODER_STATE_SEEKING;
  1447   if (mDecoder->GetDecodedStream()) {
  1448     mDecoder->RecreateDecodedStream(seekTime - mStartTime);
  1450   ScheduleStateMachine();
  1453 void MediaDecoderStateMachine::StopAudioThread()
  1455   NS_ASSERTION(OnDecodeThread() ||
  1456                OnStateMachineThread(), "Should be on decode thread or state machine thread");
  1457   AssertCurrentThreadInMonitor();
  1459   if (mStopAudioThread) {
  1460     // Nothing to do, since the thread is already stopping
  1461     return;
  1464   mStopAudioThread = true;
  1465   mDecoder->GetReentrantMonitor().NotifyAll();
  1466   if (mAudioThread) {
  1467     DECODER_LOG(PR_LOG_DEBUG, "Shutdown audio thread");
  1469       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1470       mAudioThread->Shutdown();
  1472     mAudioThread = nullptr;
  1473     // Now that the audio thread is dead, try sending data to our MediaStream(s).
  1474     // That may have been waiting for the audio thread to stop.
  1475     SendStreamData();
  1479 nsresult
  1480 MediaDecoderStateMachine::EnqueueDecodeMetadataTask()
  1482   AssertCurrentThreadInMonitor();
  1484   if (mState != DECODER_STATE_DECODING_METADATA) {
  1485     return NS_OK;
  1487   nsresult rv = mDecodeTaskQueue->Dispatch(
  1488     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata));
  1489   NS_ENSURE_SUCCESS(rv, rv);
  1491   return NS_OK;
  1494 void
  1495 MediaDecoderStateMachine::EnsureActive()
  1497   AssertCurrentThreadInMonitor();
  1498   MOZ_ASSERT(OnDecodeThread());
  1499   if (!mIsReaderIdle) {
  1500     return;
  1502   mIsReaderIdle = false;
  1504     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1505     SetReaderActive();
  1509 void
  1510 MediaDecoderStateMachine::SetReaderIdle()
  1512 #ifdef PR_LOGGING
  1514     ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1515     DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld",
  1516                 GetDecodedAudioDuration(),
  1517                 VideoQueue().Duration());
  1519 #endif
  1520   MOZ_ASSERT(OnDecodeThread());
  1521   mReader->SetIdle();
  1524 void
  1525 MediaDecoderStateMachine::SetReaderActive()
  1527   DECODER_LOG(PR_LOG_DEBUG, "SetReaderActive()");
  1528   MOZ_ASSERT(OnDecodeThread());
  1529   mReader->SetActive();
  1532 void
  1533 MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded()
  1535   AssertCurrentThreadInMonitor();
  1537   // NeedToDecodeAudio() can go from false to true while we hold the
  1538   // monitor, but it can't go from true to false. This can happen because
  1539   // NeedToDecodeAudio() takes into account the amount of decoded audio
  1540   // that's been written to the AudioStream but not played yet. So if we
  1541   // were calling NeedToDecodeAudio() twice and we thread-context switch
  1542   // between the calls, audio can play, which can affect the return value
  1543   // of NeedToDecodeAudio() giving inconsistent results. So we cache the
  1544   // value returned by NeedToDecodeAudio(), and make decisions
  1545   // based on the cached value. If NeedToDecodeAudio() has
  1546   // returned false, and then subsequently returns true and we're not
  1547   // playing, it will probably be OK since we don't need to consume data
  1548   // anyway.
  1550   const bool needToDecodeAudio = NeedToDecodeAudio();
  1551   const bool needToDecodeVideo = NeedToDecodeVideo();
  1553   // If we're in completed state, we should not need to decode anything else.
  1554   MOZ_ASSERT(mState != DECODER_STATE_COMPLETED ||
  1555              (!needToDecodeAudio && !needToDecodeVideo));
  1557   bool needIdle = !mDecoder->IsLogicallyPlaying() &&
  1558                   mState != DECODER_STATE_SEEKING &&
  1559                   !needToDecodeAudio &&
  1560                   !needToDecodeVideo &&
  1561                   !IsPlaying();
  1563   if (needToDecodeAudio) {
  1564     EnsureAudioDecodeTaskQueued();
  1566   if (needToDecodeVideo) {
  1567     EnsureVideoDecodeTaskQueued();
  1570   if (mIsReaderIdle == needIdle) {
  1571     return;
  1573   mIsReaderIdle = needIdle;
  1574   RefPtr<nsIRunnable> event;
  1575   if (mIsReaderIdle) {
  1576     event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderIdle);
  1577   } else {
  1578     event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderActive);
  1580   if (NS_FAILED(mDecodeTaskQueue->Dispatch(event.forget())) &&
  1581       mState != DECODER_STATE_SHUTDOWN) {
  1582     NS_WARNING("Failed to dispatch event to set decoder idle state");
  1586 nsresult
  1587 MediaDecoderStateMachine::EnqueueDecodeSeekTask()
  1589   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1590                "Should be on state machine or decode thread.");
  1591   AssertCurrentThreadInMonitor();
  1593   if (mState != DECODER_STATE_SEEKING) {
  1594     return NS_OK;
  1596   nsresult rv = mDecodeTaskQueue->Dispatch(
  1597     NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek));
  1598   NS_ENSURE_SUCCESS(rv, rv);
  1600   return NS_OK;
  1603 nsresult
  1604 MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded()
  1606   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1607   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1608                "Should be on state machine or decode thread.");
  1610   if (NeedToDecodeAudio()) {
  1611     return EnsureAudioDecodeTaskQueued();
  1614   return NS_OK;
  1617 nsresult
  1618 MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued()
  1620   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1621   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1622                "Should be on state machine or decode thread.");
  1624   if (mState >= DECODER_STATE_COMPLETED) {
  1625     return NS_OK;
  1628   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
  1630   if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) {
  1631     nsresult rv = mDecodeTaskQueue->Dispatch(
  1632       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio));
  1633     if (NS_SUCCEEDED(rv)) {
  1634       mDispatchedAudioDecodeTask = true;
  1635     } else {
  1636       NS_WARNING("Failed to dispatch task to decode audio");
  1640   return NS_OK;
  1643 nsresult
  1644 MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded()
  1646   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1647   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1648                "Should be on state machine or decode thread.");
  1650   if (NeedToDecodeVideo()) {
  1651     return EnsureVideoDecodeTaskQueued();
  1654   return NS_OK;
  1657 nsresult
  1658 MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued()
  1660   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1661   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1662                "Should be on state machine or decode thread.");
  1664   if (mState >= DECODER_STATE_COMPLETED) {
  1665     return NS_OK;
  1668   MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
  1670   if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) {
  1671     nsresult rv = mDecodeTaskQueue->Dispatch(
  1672       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo));
  1673     if (NS_SUCCEEDED(rv)) {
  1674       mDispatchedVideoDecodeTask = true;
  1675     } else {
  1676       NS_WARNING("Failed to dispatch task to decode video");
  1680   return NS_OK;
  1683 nsresult
  1684 MediaDecoderStateMachine::StartAudioThread()
  1686   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1687                "Should be on state machine or decode thread.");
  1688   AssertCurrentThreadInMonitor();
  1689   if (mAudioCaptured) {
  1690     NS_ASSERTION(mStopAudioThread, "mStopAudioThread must always be true if audio is captured");
  1691     return NS_OK;
  1694   mStopAudioThread = false;
  1695   if (HasAudio() && !mAudioThread) {
  1696     nsresult rv = NS_NewNamedThread("Media Audio",
  1697                                     getter_AddRefs(mAudioThread),
  1698                                     nullptr,
  1699                                     MEDIA_THREAD_STACK_SIZE);
  1700     if (NS_FAILED(rv)) {
  1701       DECODER_LOG(PR_LOG_WARNING, "Changed state to SHUTDOWN because failed to create audio thread");
  1702       mState = DECODER_STATE_SHUTDOWN;
  1703       return rv;
  1706     nsCOMPtr<nsIRunnable> event =
  1707       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop);
  1708     mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL);
  1710   return NS_OK;
  1713 int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
  1715   NS_ASSERTION(HasAudio(),
  1716                "Should only call AudioDecodedUsecs() when we have audio");
  1717   // The amount of audio we have decoded is the amount of audio data we've
  1718   // already decoded and pushed to the hardware, plus the amount of audio
  1719   // data waiting to be pushed to the hardware.
  1720   int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0;
  1721   return pushed + AudioQueue().Duration();
  1724 bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
  1726   AssertCurrentThreadInMonitor();
  1727   // We consider ourselves low on decoded data if we're low on audio,
  1728   // provided we've not decoded to the end of the audio stream, or
  1729   // if we're low on video frames, provided
  1730   // we've not decoded to the end of the video stream.
  1731   return ((HasAudio() &&
  1732            !AudioQueue().IsFinished() &&
  1733            AudioDecodedUsecs() < aAudioUsecs)
  1734           ||
  1735          (HasVideo() &&
  1736           !VideoQueue().IsFinished() &&
  1737           static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
  1740 bool MediaDecoderStateMachine::HasLowUndecodedData()
  1742   return HasLowUndecodedData(mLowDataThresholdUsecs);
  1745 bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs)
  1747   AssertCurrentThreadInMonitor();
  1748   NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
  1749                "Must have loaded metadata for GetBuffered() to work");
  1751   bool reliable;
  1752   double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable);
  1753   if (!reliable) {
  1754     // Default to assuming we have enough
  1755     return false;
  1758   MediaResource* stream = mDecoder->GetResource();
  1759   int64_t currentPos = stream->Tell();
  1760   int64_t requiredPos = currentPos + int64_t((aUsecs/1000000.0)*bytesPerSecond);
  1761   int64_t length = stream->GetLength();
  1762   if (length >= 0) {
  1763     requiredPos = std::min(requiredPos, length);
  1766   return stream->GetCachedDataEnd(currentPos) < requiredPos;
  1769 void
  1770 MediaDecoderStateMachine::DecodeError()
  1772   AssertCurrentThreadInMonitor();
  1773   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1775   // Change state to shutdown before sending error report to MediaDecoder
  1776   // and the HTMLMediaElement, so that our pipeline can start exiting
  1777   // cleanly during the sync dispatch below.
  1778   DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN");
  1779   ScheduleStateMachine();
  1780   mState = DECODER_STATE_SHUTDOWN;
  1781   mDecoder->GetReentrantMonitor().NotifyAll();
  1783   // Dispatch the event to call DecodeError synchronously. This ensures
  1784   // we're in shutdown state by the time we exit the decode thread.
  1785   // If we just moved to shutdown state here on the decode thread, we may
  1786   // cause the state machine to shutdown/free memory without closing its
  1787   // media stream properly, and we'll get callbacks from the media stream
  1788   // causing a crash.
  1790     nsCOMPtr<nsIRunnable> event =
  1791       NS_NewRunnableMethod(mDecoder, &MediaDecoder::DecodeError);
  1792     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1793     NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
  1797 void
  1798 MediaDecoderStateMachine::CallDecodeMetadata()
  1800   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1801   if (mState != DECODER_STATE_DECODING_METADATA) {
  1802     return;
  1804   if (NS_FAILED(DecodeMetadata())) {
  1805     DECODER_LOG(PR_LOG_WARNING, "Decode metadata failed, shutting down decoder");
  1806     DecodeError();
  1810 nsresult MediaDecoderStateMachine::DecodeMetadata()
  1812   AssertCurrentThreadInMonitor();
  1813   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1814   DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
  1815   if (mState != DECODER_STATE_DECODING_METADATA) {
  1816     return NS_ERROR_FAILURE;
  1818   EnsureActive();
  1820   nsresult res;
  1821   MediaInfo info;
  1822   MetadataTags* tags;
  1824     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1825     res = mReader->ReadMetadata(&info, &tags);
  1827   if (NS_SUCCEEDED(res) &&
  1828       mState == DECODER_STATE_DECODING_METADATA &&
  1829       mReader->IsWaitingMediaResources()) {
  1830     // change state to DECODER_STATE_WAIT_FOR_RESOURCES
  1831     StartWaitForResources();
  1832     return NS_OK;
  1835   mInfo = info;
  1837   if (NS_FAILED(res) || (!info.HasValidMedia())) {
  1838     return NS_ERROR_FAILURE;
  1840   mDecoder->StartProgressUpdates();
  1841   mGotDurationFromMetaData = (GetDuration() != -1);
  1843   VideoData* videoData = FindStartTime();
  1844   if (videoData) {
  1845     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1846     RenderVideoFrame(videoData, TimeStamp::Now());
  1849   if (mState == DECODER_STATE_SHUTDOWN) {
  1850     return NS_ERROR_FAILURE;
  1853   NS_ASSERTION(mStartTime != -1, "Must have start time");
  1854   MOZ_ASSERT((!HasVideo() && !HasAudio()) ||
  1855               !(mMediaSeekable && mTransportSeekable) || mEndTime != -1,
  1856               "Active seekable media should have end time");
  1857   MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) ||
  1858              GetDuration() != -1, "Seekable media should have duration");
  1859   DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) "
  1860               "transportSeekable=%d, mediaSeekable=%d",
  1861               mStartTime, mEndTime, GetDuration(), mTransportSeekable, mMediaSeekable);
  1863   if (HasAudio() && !HasVideo()) {
  1864     // We're playing audio only. We don't need to worry about slow video
  1865     // decodes causing audio underruns, so don't buffer so much audio in
  1866     // order to reduce memory usage.
  1867     mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
  1868     mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
  1871   // Inform the element that we've loaded the metadata and the first frame.
  1872   nsCOMPtr<nsIRunnable> metadataLoadedEvent =
  1873     new AudioMetadataEventRunner(mDecoder,
  1874                                  mInfo.mAudio.mChannels,
  1875                                  mInfo.mAudio.mRate,
  1876                                  HasAudio(),
  1877                                  HasVideo(),
  1878                                  tags);
  1879   NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
  1881   if (HasAudio()) {
  1882     RefPtr<nsIRunnable> decodeTask(
  1883       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
  1884     AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
  1886   if (HasVideo()) {
  1887     RefPtr<nsIRunnable> decodeTask(
  1888       NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
  1889     VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
  1892   if (mState == DECODER_STATE_DECODING_METADATA) {
  1893     DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING");
  1894     StartDecoding();
  1897   // For very short media FindStartTime() can decode the entire media.
  1898   // So we need to check if this has occurred, else our decode pipeline won't
  1899   // run (since it doesn't need to) and we won't detect end of stream.
  1900   CheckIfDecodeComplete();
  1902   if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) &&
  1903       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1904       !IsPlaying())
  1906     StartPlayback();
  1909   return NS_OK;
  1912 void MediaDecoderStateMachine::DecodeSeek()
  1914   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1915   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1916   if (mState != DECODER_STATE_SEEKING) {
  1917     return;
  1919   EnsureActive();
  1921   // During the seek, don't have a lock on the decoder state,
  1922   // otherwise long seek operations can block the main thread.
  1923   // The events dispatched to the main thread are SYNC calls.
  1924   // These calls are made outside of the decode monitor lock so
  1925   // it is safe for the main thread to makes calls that acquire
  1926   // the lock since it won't deadlock. We check the state when
  1927   // acquiring the lock again in case shutdown has occurred
  1928   // during the time when we didn't have the lock.
  1929   int64_t seekTime = mSeekTarget.mTime;
  1930   mDecoder->StopProgressUpdates();
  1932   bool currentTimeChanged = false;
  1933   const int64_t mediaTime = GetMediaTime();
  1934   if (mediaTime != seekTime) {
  1935     currentTimeChanged = true;
  1936     // Stop playback now to ensure that while we're outside the monitor
  1937     // dispatching SeekingStarted, playback doesn't advance and mess with
  1938     // mCurrentFrameTime that we've setting to seekTime here.
  1939     StopPlayback();
  1940     UpdatePlaybackPositionInternal(seekTime);
  1943   // SeekingStarted will do a UpdateReadyStateForData which will
  1944   // inform the element and its users that we have no frames
  1945   // to display
  1947     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1948     nsCOMPtr<nsIRunnable> startEvent =
  1949       NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted);
  1950     NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC);
  1953   int64_t newCurrentTime = seekTime;
  1954   if (currentTimeChanged) {
  1955     // The seek target is different than the current playback position,
  1956     // we'll need to seek the playback position, so shutdown our decode
  1957     // and audio threads.
  1958     StopAudioThread();
  1959     ResetPlayback();
  1960     nsresult res;
  1962       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1963       // Now perform the seek. We must not hold the state machine monitor
  1964       // while we seek, since the seek reads, which could block on I/O.
  1965       res = mReader->Seek(seekTime,
  1966                           mStartTime,
  1967                           mEndTime,
  1968                           mediaTime);
  1970       if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) {
  1971         res = mReader->DecodeToTarget(seekTime);
  1975     if (NS_SUCCEEDED(res)) {
  1976       int64_t nextSampleStartTime = 0;
  1977       VideoData* video = nullptr;
  1979         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1980         video = mReader->FindStartTime(nextSampleStartTime);
  1983       // Setup timestamp state.
  1984       if (seekTime == mEndTime) {
  1985         newCurrentTime = mAudioStartTime = seekTime;
  1986       } else if (HasAudio()) {
  1987         AudioData* audio = AudioQueue().PeekFront();
  1988         newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
  1989       } else {
  1990         newCurrentTime = video ? video->mTime : seekTime;
  1992       mPlayDuration = newCurrentTime - mStartTime;
  1994       if (HasVideo()) {
  1995         if (video) {
  1997             ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1998             RenderVideoFrame(video, TimeStamp::Now());
  2000           nsCOMPtr<nsIRunnable> event =
  2001             NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
  2002           NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  2005     } else {
  2006       DecodeError();
  2009   mDecoder->StartProgressUpdates();
  2010   if (mState == DECODER_STATE_DECODING_METADATA ||
  2011       mState == DECODER_STATE_DORMANT ||
  2012       mState == DECODER_STATE_SHUTDOWN) {
  2013     return;
  2016   // Change state to DECODING or COMPLETED now. SeekingStopped will
  2017   // call MediaDecoderStateMachine::Seek to reset our state to SEEKING
  2018   // if we need to seek again.
  2020   nsCOMPtr<nsIRunnable> stopEvent;
  2021   bool isLiveStream = mDecoder->GetResource()->GetLength() == -1;
  2022   if (GetMediaTime() == mEndTime && !isLiveStream) {
  2023     // Seeked to end of media, move to COMPLETED state. Note we don't do
  2024     // this if we're playing a live stream, since the end of media will advance
  2025     // once we download more data!
  2026     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime);
  2027     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd);
  2028     // Explicitly set our state so we don't decode further, and so
  2029     // we report playback ended to the media element.
  2030     mState = DECODER_STATE_COMPLETED;
  2031     mIsAudioDecoding = false;
  2032     mIsVideoDecoding = false;
  2033     DispatchDecodeTasksIfNeeded();
  2034   } else {
  2035     DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime);
  2036     stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped);
  2037     StartDecoding();
  2040   if (newCurrentTime != mediaTime) {
  2041     UpdatePlaybackPositionInternal(newCurrentTime);
  2042     if (mDecoder->GetDecodedStream()) {
  2043       SetSyncPointForMediaStream();
  2047   // Try to decode another frame to detect if we're at the end...
  2048   DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime);
  2051     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2052     NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC);
  2055   // Reset quick buffering status. This ensures that if we began the
  2056   // seek while quick-buffering, we won't bypass quick buffering mode
  2057   // if we need to buffer after the seek.
  2058   mQuickBuffering = false;
  2060   ScheduleStateMachine();
  2063 // Runnable to dispose of the decoder and state machine on the main thread.
  2064 class nsDecoderDisposeEvent : public nsRunnable {
  2065 public:
  2066   nsDecoderDisposeEvent(already_AddRefed<MediaDecoder> aDecoder,
  2067                         already_AddRefed<MediaDecoderStateMachine> aStateMachine)
  2068     : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
  2069   NS_IMETHOD Run() {
  2070     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
  2071     mStateMachine->ReleaseDecoder();
  2072     mDecoder->ReleaseStateMachine();
  2073     mStateMachine = nullptr;
  2074     mDecoder = nullptr;
  2075     return NS_OK;
  2077 private:
  2078   nsRefPtr<MediaDecoder> mDecoder;
  2079   nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  2080 };
  2082 // Runnable which dispatches an event to the main thread to dispose of the
  2083 // decoder and state machine. This runs on the state machine thread after
  2084 // the state machine has shutdown, and all events for that state machine have
  2085 // finished running.
  2086 class nsDispatchDisposeEvent : public nsRunnable {
  2087 public:
  2088   nsDispatchDisposeEvent(MediaDecoder* aDecoder,
  2089                          MediaDecoderStateMachine* aStateMachine)
  2090     : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
  2091   NS_IMETHOD Run() {
  2092     NS_DispatchToMainThread(new nsDecoderDisposeEvent(mDecoder.forget(),
  2093                                                       mStateMachine.forget()));
  2094     return NS_OK;
  2096 private:
  2097   nsRefPtr<MediaDecoder> mDecoder;
  2098   nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  2099 };
  2101 nsresult MediaDecoderStateMachine::RunStateMachine()
  2103   AssertCurrentThreadInMonitor();
  2105   MediaResource* resource = mDecoder->GetResource();
  2106   NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER);
  2108   switch (mState) {
  2109     case DECODER_STATE_SHUTDOWN: {
  2110       if (IsPlaying()) {
  2111         StopPlayback();
  2113       StopAudioThread();
  2114       // If mAudioThread is non-null after StopAudioThread completes, we are
  2115       // running in a nested event loop waiting for Shutdown() on
  2116       // mAudioThread to complete.  Return to the event loop and let it
  2117       // finish processing before continuing with shutdown.
  2118       if (mAudioThread) {
  2119         MOZ_ASSERT(mStopAudioThread);
  2120         return NS_OK;
  2123       // The reader's listeners hold references to the state machine,
  2124       // creating a cycle which keeps the state machine and its shared
  2125       // thread pools alive. So break it here.
  2126       AudioQueue().ClearListeners();
  2127       VideoQueue().ClearListeners();
  2130         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2131         // Wait for the thread decoding to exit.
  2132         mDecodeTaskQueue->Shutdown();
  2133         mDecodeTaskQueue = nullptr;
  2134         mReader->ReleaseMediaResources();
  2136       // Now that those threads are stopped, there's no possibility of
  2137       // mPendingWakeDecoder being needed again. Revoke it.
  2138       mPendingWakeDecoder = nullptr;
  2140       MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN,
  2141                  "How did we escape from the shutdown state?");
  2142       // We must daisy-chain these events to destroy the decoder. We must
  2143       // destroy the decoder on the main thread, but we can't destroy the
  2144       // decoder while this thread holds the decoder monitor. We can't
  2145       // dispatch an event to the main thread to destroy the decoder from
  2146       // here, as the event may run before the dispatch returns, and we
  2147       // hold the decoder monitor here. We also want to guarantee that the
  2148       // state machine is destroyed on the main thread, and so the
  2149       // event runner running this function (which holds a reference to the
  2150       // state machine) needs to finish and be released in order to allow
  2151       // that. So we dispatch an event to run after this event runner has
  2152       // finished and released its monitor/references. That event then will
  2153       // dispatch an event to the main thread to release the decoder and
  2154       // state machine.
  2155       GetStateMachineThread()->Dispatch(
  2156         new nsDispatchDisposeEvent(mDecoder, this), NS_DISPATCH_NORMAL);
  2158       mTimer->Cancel();
  2159       mTimer = nullptr;
  2160       return NS_OK;
  2163     case DECODER_STATE_DORMANT: {
  2164       if (IsPlaying()) {
  2165         StopPlayback();
  2167       StopAudioThread();
  2168       // Now that those threads are stopped, there's no possibility of
  2169       // mPendingWakeDecoder being needed again. Revoke it.
  2170       mPendingWakeDecoder = nullptr;
  2172         ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2173         // Wait for the thread decoding, if any, to exit.
  2174         mDecodeTaskQueue->AwaitIdle();
  2175         mReader->ReleaseMediaResources();
  2177       return NS_OK;
  2180     case DECODER_STATE_WAIT_FOR_RESOURCES: {
  2181       return NS_OK;
  2184     case DECODER_STATE_DECODING_METADATA: {
  2185       // Ensure we have a decode thread to decode metadata.
  2186       return EnqueueDecodeMetadataTask();
  2189     case DECODER_STATE_DECODING: {
  2190       if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING &&
  2191           IsPlaying())
  2193         // We're playing, but the element/decoder is in paused state. Stop
  2194         // playing!
  2195         StopPlayback();
  2198       if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  2199           !IsPlaying()) {
  2200         // We are playing, but the state machine does not know it yet. Tell it
  2201         // that it is, so that the clock can be properly queried.
  2202         StartPlayback();
  2205       AdvanceFrame();
  2206       NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING ||
  2207                    IsStateMachineScheduled() ||
  2208                    mPlaybackRate == 0.0, "Must have timer scheduled");
  2209       return NS_OK;
  2212     case DECODER_STATE_BUFFERING: {
  2213       TimeStamp now = TimeStamp::Now();
  2214       NS_ASSERTION(!mBufferingStart.IsNull(), "Must know buffering start time.");
  2216       // We will remain in the buffering state if we've not decoded enough
  2217       // data to begin playback, or if we've not downloaded a reasonable
  2218       // amount of data inside our buffering time.
  2219       TimeDuration elapsed = now - mBufferingStart;
  2220       bool isLiveStream = resource->GetLength() == -1;
  2221       if ((isLiveStream || !mDecoder->CanPlayThrough()) &&
  2222             elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) &&
  2223             (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS)
  2224                             : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) &&
  2225             !mDecoder->IsDataCachedToEndOfResource() &&
  2226             !resource->IsSuspended())
  2228         DECODER_LOG(PR_LOG_DEBUG, "Buffering: wait %ds, timeout in %.3lfs %s",
  2229                     mBufferingWait, mBufferingWait - elapsed.ToSeconds(),
  2230                     (mQuickBuffering ? "(quick exit)" : ""));
  2231         ScheduleStateMachine(USECS_PER_S);
  2232         return NS_OK;
  2233       } else {
  2234         DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING");
  2235         DECODER_LOG(PR_LOG_DEBUG, "Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
  2236         StartDecoding();
  2239       // Notify to allow blocked decoder thread to continue
  2240       mDecoder->GetReentrantMonitor().NotifyAll();
  2241       UpdateReadyState();
  2242       if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  2243           !IsPlaying())
  2245         StartPlayback();
  2247       NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
  2248       return NS_OK;
  2251     case DECODER_STATE_SEEKING: {
  2252       // Ensure we have a decode thread to perform the seek.
  2253      return EnqueueDecodeSeekTask();
  2256     case DECODER_STATE_COMPLETED: {
  2257       // Play the remaining media. We want to run AdvanceFrame() at least
  2258       // once to ensure the current playback position is advanced to the
  2259       // end of the media, and so that we update the readyState.
  2260       if (VideoQueue().GetSize() > 0 ||
  2261           (HasAudio() && !mAudioCompleted) ||
  2262           (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished()))
  2264         AdvanceFrame();
  2265         NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING ||
  2266                      mPlaybackRate == 0 ||
  2267                      IsStateMachineScheduled(),
  2268                      "Must have timer scheduled");
  2269         return NS_OK;
  2272       // StopPlayback in order to reset the IsPlaying() state so audio
  2273       // is restarted correctly.
  2274       StopPlayback();
  2276       if (mState != DECODER_STATE_COMPLETED) {
  2277         // While we're presenting a frame we can change state. Whatever changed
  2278         // our state should have scheduled another state machine run.
  2279         NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
  2280         return NS_OK;
  2283       StopAudioThread();
  2284       // When we're decoding to a stream, the stream's main-thread finish signal
  2285       // will take care of calling MediaDecoder::PlaybackEnded.
  2286       if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  2287           !mDecoder->GetDecodedStream()) {
  2288         int64_t videoTime = HasVideo() ? mVideoFrameEndTime : 0;
  2289         int64_t clockTime = std::max(mEndTime, std::max(videoTime, GetAudioClock()));
  2290         UpdatePlaybackPosition(clockTime);
  2293           // Wait for the state change is completed in the main thread,
  2294           // otherwise we might see |mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING|
  2295           // in next loop and send |MediaDecoder::PlaybackEnded| again to trigger 'ended'
  2296           // event twice in the media element.
  2297           ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2298           nsCOMPtr<nsIRunnable> event =
  2299             NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded);
  2300           NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
  2303       return NS_OK;
  2307   return NS_OK;
  2310 void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData,
  2311                                                 TimeStamp aTarget)
  2313   NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  2314                "Should be on state machine or decode thread.");
  2315   mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
  2317   if (aData->mDuplicate) {
  2318     return;
  2321   VERBOSE_LOG("playing video frame %lld", aData->mTime);
  2323   VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  2324   if (container) {
  2325     container->SetCurrentFrame(ThebesIntSize(aData->mDisplay), aData->mImage,
  2326                                aTarget);
  2330 int64_t
  2331 MediaDecoderStateMachine::GetAudioClock()
  2333   // We must hold the decoder monitor while using the audio stream off the
  2334   // audio thread to ensure that it doesn't get destroyed on the audio thread
  2335   // while we're using it.
  2336   AssertCurrentThreadInMonitor();
  2337   if (!HasAudio() || mAudioCaptured)
  2338     return -1;
  2339   if (!mAudioStream) {
  2340     // Audio thread hasn't played any data yet.
  2341     return mAudioStartTime;
  2343   int64_t t = mAudioStream->GetPosition();
  2344   return (t == -1) ? -1 : t + mAudioStartTime;
  2347 int64_t MediaDecoderStateMachine::GetVideoStreamPosition()
  2349   AssertCurrentThreadInMonitor();
  2351   if (!IsPlaying()) {
  2352     return mPlayDuration + mStartTime;
  2355   // The playbackRate has been just been changed, reset the playstartTime.
  2356   if (mResetPlayStartTime) {
  2357     mPlayStartTime = TimeStamp::Now();
  2358     mResetPlayStartTime = false;
  2361   int64_t pos = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
  2362   pos -= mBasePosition;
  2363   NS_ASSERTION(pos >= 0, "Video stream position should be positive.");
  2364   return mBasePosition + pos * mPlaybackRate + mStartTime;
  2367 int64_t MediaDecoderStateMachine::GetClock()
  2369   AssertCurrentThreadInMonitor();
  2371   // Determine the clock time. If we've got audio, and we've not reached
  2372   // the end of the audio, use the audio clock. However if we've finished
  2373   // audio, or don't have audio, use the system clock. If our output is being
  2374   // fed to a MediaStream, use that stream as the source of the clock.
  2375   int64_t clock_time = -1;
  2376   DecodedStreamData* stream = mDecoder->GetDecodedStream();
  2377   if (!IsPlaying()) {
  2378     clock_time = mPlayDuration + mStartTime;
  2379   } else if (stream) {
  2380     clock_time = GetCurrentTimeViaMediaStreamSync();
  2381   } else {
  2382     int64_t audio_time = GetAudioClock();
  2383     if (HasAudio() && !mAudioCompleted && audio_time != -1) {
  2384       clock_time = audio_time;
  2385       // Resync against the audio clock, while we're trusting the
  2386       // audio clock. This ensures no "drift", particularly on Linux.
  2387       mPlayDuration = clock_time - mStartTime;
  2388       mPlayStartTime = TimeStamp::Now();
  2389     } else {
  2390       // Audio is disabled on this system. Sync to the system clock.
  2391       clock_time = GetVideoStreamPosition();
  2392       // Ensure the clock can never go backwards.
  2393       NS_ASSERTION(mCurrentFrameTime <= clock_time || mPlaybackRate <= 0,
  2394           "Clock should go forwards if the playback rate is > 0.");
  2397   return clock_time;
  2400 void MediaDecoderStateMachine::AdvanceFrame()
  2402   NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
  2403   AssertCurrentThreadInMonitor();
  2404   NS_ASSERTION(!HasAudio() || mAudioStartTime != -1,
  2405                "Should know audio start time if we have audio.");
  2407   if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING) {
  2408     return;
  2411   // If playbackRate is 0.0, we should stop the progress, but not be in paused
  2412   // state, per spec.
  2413   if (mPlaybackRate == 0.0) {
  2414     return;
  2417   int64_t clock_time = GetClock();
  2418   // Skip frames up to the frame at the playback position, and figure out
  2419   // the time remaining until it's time to display the next frame.
  2420   int64_t remainingTime = AUDIO_DURATION_USECS;
  2421   NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
  2422   nsAutoPtr<VideoData> currentFrame;
  2423 #ifdef PR_LOGGING
  2424   int32_t droppedFrames = 0;
  2425 #endif
  2426   if (VideoQueue().GetSize() > 0) {
  2427     VideoData* frame = VideoQueue().PeekFront();
  2428     while (mRealTime || clock_time >= frame->mTime) {
  2429       mVideoFrameEndTime = frame->GetEndTime();
  2430       currentFrame = frame;
  2431 #ifdef PR_LOGGING
  2432       VERBOSE_LOG("discarding video frame %lld", frame->mTime);
  2433       if (droppedFrames++) {
  2434         VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1);
  2436 #endif
  2437       VideoQueue().PopFront();
  2438       // Notify the decode thread that the video queue's buffers may have
  2439       // free'd up space for more frames.
  2440       mDecoder->GetReentrantMonitor().NotifyAll();
  2441       mDecoder->UpdatePlaybackOffset(frame->mOffset);
  2442       if (VideoQueue().GetSize() == 0)
  2443         break;
  2444       frame = VideoQueue().PeekFront();
  2446     // Current frame has already been presented, wait until it's time to
  2447     // present the next frame.
  2448     if (frame && !currentFrame) {
  2449       int64_t now = IsPlaying() ? clock_time : mPlayDuration;
  2451       remainingTime = frame->mTime - now;
  2455   // Check to see if we don't have enough data to play up to the next frame.
  2456   // If we don't, switch to buffering mode.
  2457   MediaResource* resource = mDecoder->GetResource();
  2458   if (mState == DECODER_STATE_DECODING &&
  2459       mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  2460       HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
  2461       !mDecoder->IsDataCachedToEndOfResource() &&
  2462       !resource->IsSuspended()) {
  2463     if (JustExitedQuickBuffering() || HasLowUndecodedData()) {
  2464       if (currentFrame) {
  2465         VideoQueue().PushFront(currentFrame.forget());
  2467       StartBuffering();
  2468       // Don't go straight back to the state machine loop since that might
  2469       // cause us to start decoding again and we could flip-flop between
  2470       // decoding and quick-buffering.
  2471       ScheduleStateMachine(USECS_PER_S);
  2472       return;
  2476   // We've got enough data to keep playing until at least the next frame.
  2477   // Start playing now if need be.
  2478   if (!IsPlaying() && ((mFragmentEndTime >= 0 && clock_time < mFragmentEndTime) || mFragmentEndTime < 0)) {
  2479     StartPlayback();
  2482   if (currentFrame) {
  2483     // Decode one frame and display it.
  2484     TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) +
  2485                           UsecsToDuration(currentFrame->mTime - mStartTime);
  2486     NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time");
  2488       ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2489       // If we have video, we want to increment the clock in steps of the frame
  2490       // duration.
  2491       RenderVideoFrame(currentFrame, presTime);
  2493     // If we're no longer playing after dropping and reacquiring the lock,
  2494     // playback must've been stopped on the decode thread (by a seek, for
  2495     // example).  In that case, the current frame is probably out of date.
  2496     if (!IsPlaying()) {
  2497       ScheduleStateMachine();
  2498       return;
  2500     MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics();
  2501     frameStats.NotifyPresentedFrame();
  2502     remainingTime = currentFrame->GetEndTime() - clock_time;
  2503     currentFrame = nullptr;
  2506   // Cap the current time to the larger of the audio and video end time.
  2507   // This ensures that if we're running off the system clock, we don't
  2508   // advance the clock to after the media end time.
  2509   if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) {
  2510     // These will be non -1 if we've displayed a video frame, or played an audio frame.
  2511     clock_time = std::min(clock_time, std::max(mVideoFrameEndTime, mAudioEndTime));
  2512     if (clock_time > GetMediaTime()) {
  2513       // Only update the playback position if the clock time is greater
  2514       // than the previous playback position. The audio clock can
  2515       // sometimes report a time less than its previously reported in
  2516       // some situations, and we need to gracefully handle that.
  2517       UpdatePlaybackPosition(clock_time);
  2521   // If the number of audio/video frames queued has changed, either by
  2522   // this function popping and playing a video frame, or by the audio
  2523   // thread popping and playing an audio frame, we may need to update our
  2524   // ready state. Post an update to do so.
  2525   UpdateReadyState();
  2527   ScheduleStateMachine(remainingTime);
  2530 void MediaDecoderStateMachine::Wait(int64_t aUsecs) {
  2531   NS_ASSERTION(OnAudioThread(), "Only call on the audio thread");
  2532   AssertCurrentThreadInMonitor();
  2533   TimeStamp end = TimeStamp::Now() + UsecsToDuration(std::max<int64_t>(USECS_PER_MS, aUsecs));
  2534   TimeStamp now;
  2535   while ((now = TimeStamp::Now()) < end &&
  2536          mState != DECODER_STATE_SHUTDOWN &&
  2537          mState != DECODER_STATE_SEEKING &&
  2538          !mStopAudioThread &&
  2539          IsPlaying())
  2541     int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000));
  2542     if (ms == 0 || ms > UINT32_MAX) {
  2543       break;
  2545     mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms)));
  2549 VideoData* MediaDecoderStateMachine::FindStartTime()
  2551   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  2552   AssertCurrentThreadInMonitor();
  2553   int64_t startTime = 0;
  2554   mStartTime = 0;
  2555   VideoData* v = nullptr;
  2557     ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  2558     v = mReader->FindStartTime(startTime);
  2560   if (startTime != 0) {
  2561     mStartTime = startTime;
  2562     if (mGotDurationFromMetaData) {
  2563       NS_ASSERTION(mEndTime != -1,
  2564                    "We should have mEndTime as supplied duration here");
  2565       // We were specified a duration from a Content-Duration HTTP header.
  2566       // Adjust mEndTime so that mEndTime-mStartTime matches the specified
  2567       // duration.
  2568       mEndTime = mStartTime + mEndTime;
  2571   // Set the audio start time to be start of media. If this lies before the
  2572   // first actual audio frame we have, we'll inject silence during playback
  2573   // to ensure the audio starts at the correct time.
  2574   mAudioStartTime = mStartTime;
  2575   DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime);
  2576   return v;
  2579 void MediaDecoderStateMachine::UpdateReadyState() {
  2580   AssertCurrentThreadInMonitor();
  2582   MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus();
  2583   if (nextFrameStatus == mLastFrameStatus) {
  2584     return;
  2586   mLastFrameStatus = nextFrameStatus;
  2588   /* This is a bit tricky. MediaDecoder::UpdateReadyStateForData will run on
  2589    * the main thread and re-evaluate GetNextFrameStatus there, passing it to
  2590    * HTMLMediaElement::UpdateReadyStateForData. It doesn't use the value of
  2591    * GetNextFrameStatus we computed here, because what we're computing here
  2592    * could be stale by the time MediaDecoder::UpdateReadyStateForData runs.
  2593    * We only compute GetNextFrameStatus here to avoid posting runnables to the main
  2594    * thread unnecessarily.
  2595    */
  2596   nsCOMPtr<nsIRunnable> event;
  2597   event = NS_NewRunnableMethod(mDecoder, &MediaDecoder::UpdateReadyStateForData);
  2598   NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  2601 bool MediaDecoderStateMachine::JustExitedQuickBuffering()
  2603   return !mDecodeStartTime.IsNull() &&
  2604     mQuickBuffering &&
  2605     (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromMicroseconds(QUICK_BUFFER_THRESHOLD_USECS);
  2608 void MediaDecoderStateMachine::StartBuffering()
  2610   AssertCurrentThreadInMonitor();
  2612   if (mState != DECODER_STATE_DECODING) {
  2613     // We only move into BUFFERING state if we're actually decoding.
  2614     // If we're currently doing something else, we don't need to buffer,
  2615     // and more importantly, we shouldn't overwrite mState to interrupt
  2616     // the current operation, as that could leave us in an inconsistent
  2617     // state!
  2618     return;
  2621   if (IsPlaying()) {
  2622     StopPlayback();
  2625   TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime;
  2626   // Go into quick buffering mode provided we've not just left buffering using
  2627   // a "quick exit". This stops us flip-flopping between playing and buffering
  2628   // when the download speed is similar to the decode speed.
  2629   mQuickBuffering =
  2630     !JustExitedQuickBuffering() &&
  2631     decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS);
  2632   mBufferingStart = TimeStamp::Now();
  2634   // We need to tell the element that buffering has started.
  2635   // We can't just directly send an asynchronous runnable that
  2636   // eventually fires the "waiting" event. The problem is that
  2637   // there might be pending main-thread events, such as "data
  2638   // received" notifications, that mean we're not actually still
  2639   // buffering by the time this runnable executes. So instead
  2640   // we just trigger UpdateReadyStateForData; when it runs, it
  2641   // will check the current state and decide whether to tell
  2642   // the element we're buffering or not.
  2643   UpdateReadyState();
  2644   mState = DECODER_STATE_BUFFERING;
  2645   DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING to BUFFERING, decoded for %.3lfs",
  2646               decodeDuration.ToSeconds());
  2647 #ifdef PR_LOGGING
  2648   MediaDecoder::Statistics stats = mDecoder->GetStatistics();
  2649   DECODER_LOG(PR_LOG_DEBUG, "Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s",
  2650               stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)",
  2651               stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)");
  2652 #endif
  2655 nsresult MediaDecoderStateMachine::GetBuffered(dom::TimeRanges* aBuffered) {
  2656   MediaResource* resource = mDecoder->GetResource();
  2657   NS_ENSURE_TRUE(resource, NS_ERROR_FAILURE);
  2658   resource->Pin();
  2659   nsresult res = mReader->GetBuffered(aBuffered, mStartTime);
  2660   resource->Unpin();
  2661   return res;
  2664 nsresult MediaDecoderStateMachine::CallRunStateMachine()
  2666   AssertCurrentThreadInMonitor();
  2667   NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
  2669   // If audio is being captured, stop the audio thread if it's running
  2670   if (mAudioCaptured) {
  2671     StopAudioThread();
  2674   MOZ_ASSERT(!mInRunningStateMachine, "State machine cycles must run in sequence!");
  2675   mTimeout = TimeStamp();
  2676   mInRunningStateMachine = true;
  2677   nsresult res = RunStateMachine();
  2678   mInRunningStateMachine = false;
  2679   return res;
  2682 nsresult MediaDecoderStateMachine::TimeoutExpired(int aTimerId)
  2684   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  2685   NS_ASSERTION(OnStateMachineThread(), "Must be on state machine thread");
  2686   mTimer->Cancel();
  2687   if (mTimerId == aTimerId) {
  2688     return CallRunStateMachine();
  2689   } else {
  2690     return NS_OK;
  2694 void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() {
  2695   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  2696   DispatchAudioDecodeTaskIfNeeded();
  2697   DispatchVideoDecodeTaskIfNeeded();
  2700 class TimerEvent : public nsITimerCallback, public nsRunnable {
  2701   NS_DECL_THREADSAFE_ISUPPORTS
  2702 public:
  2703   TimerEvent(MediaDecoderStateMachine* aStateMachine, int aTimerId)
  2704     : mStateMachine(aStateMachine), mTimerId(aTimerId) {}
  2706   NS_IMETHOD Run() MOZ_OVERRIDE {
  2707     return mStateMachine->TimeoutExpired(mTimerId);
  2710   NS_IMETHOD Notify(nsITimer* aTimer) {
  2711     return mStateMachine->TimeoutExpired(mTimerId);
  2713 private:
  2714   const nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  2715   int mTimerId;
  2716 };
  2718 NS_IMPL_ISUPPORTS(TimerEvent, nsITimerCallback, nsIRunnable);
  2720 nsresult MediaDecoderStateMachine::ScheduleStateMachine(int64_t aUsecs) {
  2721   AssertCurrentThreadInMonitor();
  2722   NS_ABORT_IF_FALSE(GetStateMachineThread(),
  2723     "Must have a state machine thread to schedule");
  2725   if (mState == DECODER_STATE_SHUTDOWN) {
  2726     return NS_ERROR_FAILURE;
  2728   aUsecs = std::max<int64_t>(aUsecs, 0);
  2730   TimeStamp timeout = TimeStamp::Now() + UsecsToDuration(aUsecs);
  2731   if (!mTimeout.IsNull() && timeout >= mTimeout) {
  2732     // We've already scheduled a timer set to expire at or before this time,
  2733     // or have an event dispatched to run the state machine.
  2734     return NS_OK;
  2737   uint32_t ms = static_cast<uint32_t>((aUsecs / USECS_PER_MS) & 0xFFFFFFFF);
  2738   if (mRealTime && ms > 40) {
  2739     ms = 40;
  2742   // Don't cancel the timer here for this function will be called from
  2743   // different threads.
  2745   nsresult rv = NS_ERROR_FAILURE;
  2746   nsRefPtr<TimerEvent> event = new TimerEvent(this, mTimerId+1);
  2748   if (ms == 0) {
  2749     // Dispatch a runnable to the state machine thread when delay is 0.
  2750     // It will has less latency than dispatching a runnable to the state
  2751     // machine thread which will then schedule a zero-delay timer.
  2752     rv = GetStateMachineThread()->Dispatch(event, NS_DISPATCH_NORMAL);
  2753   } else if (OnStateMachineThread()) {
  2754     rv = mTimer->InitWithCallback(event, ms, nsITimer::TYPE_ONE_SHOT);
  2755   } else {
  2756     MOZ_ASSERT(false, "non-zero delay timer should be only scheduled in state machine thread");
  2759   if (NS_SUCCEEDED(rv)) {
  2760     mTimeout = timeout;
  2761     ++mTimerId;
  2762   } else {
  2763     NS_WARNING("Failed to schedule state machine");
  2766   return rv;
  2769 bool MediaDecoderStateMachine::OnDecodeThread() const
  2771   return mDecodeTaskQueue->IsCurrentThreadIn();
  2774 bool MediaDecoderStateMachine::OnStateMachineThread() const
  2776   bool rv = false;
  2777   mStateMachineThreadPool->IsOnCurrentThread(&rv);
  2778   return rv;
  2781 nsIEventTarget* MediaDecoderStateMachine::GetStateMachineThread()
  2783   return mStateMachineThreadPool->GetEventTarget();
  2786 void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
  2788   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  2789   NS_ASSERTION(aPlaybackRate != 0,
  2790       "PlaybackRate == 0 should be handled before this function.");
  2791   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  2793   if (mPlaybackRate == aPlaybackRate) {
  2794     return;
  2797   // Get position of the last time we changed the rate.
  2798   if (!HasAudio()) {
  2799     // mBasePosition is a position in the video stream, not an absolute time.
  2800     if (mState == DECODER_STATE_SEEKING) {
  2801       mBasePosition = mSeekTarget.mTime - mStartTime;
  2802     } else {
  2803       mBasePosition = GetVideoStreamPosition();
  2805     mPlayDuration = mBasePosition;
  2806     mResetPlayStartTime = true;
  2807     mPlayStartTime = TimeStamp::Now();
  2810   mPlaybackRate = aPlaybackRate;
  2813 void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch)
  2815   NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  2816   ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  2818   mPreservesPitch = aPreservesPitch;
  2821 void
  2822 MediaDecoderStateMachine::SetMinimizePrerollUntilPlaybackStarts()
  2824   AssertCurrentThreadInMonitor();
  2825   mMinimizePreroll = true;
  2828 bool MediaDecoderStateMachine::IsShutdown()
  2830   AssertCurrentThreadInMonitor();
  2831   return GetState() == DECODER_STATE_SHUTDOWN;
  2834 void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime,
  2835                                              int aChannels,
  2836                                              int aRate,
  2837                                              bool aHasAudio,
  2838                                              bool aHasVideo,
  2839                                              MetadataTags* aTags)
  2841   NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  2842   AssertCurrentThreadInMonitor();
  2843   TimedMetadata* metadata = new TimedMetadata;
  2844   metadata->mPublishTime = aPublishTime;
  2845   metadata->mChannels = aChannels;
  2846   metadata->mRate = aRate;
  2847   metadata->mHasAudio = aHasAudio;
  2848   metadata->mHasVideo = aHasVideo;
  2849   metadata->mTags = aTags;
  2850   mMetadataManager.QueueMetadata(metadata);
  2853 } // namespace mozilla
  2855 // avoid redefined macro in unified build
  2856 #undef DECODER_LOG
  2857 #undef VERBOSE_LOG

mercurial