content/media/MediaDecoderStateMachine.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/MediaDecoderStateMachine.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,2857 @@
     1.4 +/* vim:set ts=2 sw=2 sts=2 et cindent: */
     1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.7 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.8 +
     1.9 +#ifdef XP_WIN
    1.10 +// Include Windows headers required for enabling high precision timers.
    1.11 +#include "windows.h"
    1.12 +#include "mmsystem.h"
    1.13 +#endif
    1.14 +
    1.15 +#include "mozilla/DebugOnly.h"
    1.16 +#include <stdint.h>
    1.17 +
    1.18 +#include "MediaDecoderStateMachine.h"
    1.19 +#include "AudioStream.h"
    1.20 +#include "nsTArray.h"
    1.21 +#include "MediaDecoder.h"
    1.22 +#include "MediaDecoderReader.h"
    1.23 +#include "mozilla/mozalloc.h"
    1.24 +#include "VideoUtils.h"
    1.25 +#include "mozilla/dom/TimeRanges.h"
    1.26 +#include "nsDeque.h"
    1.27 +#include "AudioSegment.h"
    1.28 +#include "VideoSegment.h"
    1.29 +#include "ImageContainer.h"
    1.30 +#include "nsComponentManagerUtils.h"
    1.31 +#include "nsITimer.h"
    1.32 +#include "nsContentUtils.h"
    1.33 +#include "MediaShutdownManager.h"
    1.34 +#include "SharedThreadPool.h"
    1.35 +#include "MediaTaskQueue.h"
    1.36 +#include "nsIEventTarget.h"
    1.37 +#include "prenv.h"
    1.38 +#include "mozilla/Preferences.h"
    1.39 +#include "gfx2DGlue.h"
    1.40 +
    1.41 +#include <algorithm>
    1.42 +
    1.43 +namespace mozilla {
    1.44 +
    1.45 +using namespace mozilla::layers;
    1.46 +using namespace mozilla::dom;
    1.47 +using namespace mozilla::gfx;
    1.48 +
    1.49 +// avoid redefined macro in unified build
    1.50 +#undef DECODER_LOG
    1.51 +#undef VERBOSE_LOG
    1.52 +
    1.53 +#ifdef PR_LOGGING
    1.54 +extern PRLogModuleInfo* gMediaDecoderLog;
    1.55 +#define DECODER_LOG(type, msg, ...) \
    1.56 +  PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__))
    1.57 +#define VERBOSE_LOG(msg, ...)                          \
    1.58 +    PR_BEGIN_MACRO                                     \
    1.59 +      if (!PR_GetEnv("MOZ_QUIET")) {                   \
    1.60 +        DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \
    1.61 +      }                                                \
    1.62 +    PR_END_MACRO
    1.63 +#else
    1.64 +#define DECODER_LOG(type, msg, ...)
    1.65 +#define VERBOSE_LOG(msg, ...)
    1.66 +#endif
    1.67 +
    1.68 +// GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
    1.69 +// GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime
    1.70 +// implementation.  With unified builds, putting this in headers is not enough.
    1.71 +#ifdef GetCurrentTime
    1.72 +#undef GetCurrentTime
    1.73 +#endif
    1.74 +
    1.75 +// Wait this number of seconds when buffering, then leave and play
    1.76 +// as best as we can if the required amount of data hasn't been
    1.77 +// retrieved.
    1.78 +static const uint32_t BUFFERING_WAIT_S = 30;
    1.79 +
    1.80 +// If audio queue has less than this many usecs of decoded audio, we won't risk
    1.81 +// trying to decode the video, we'll skip decoding video up to the next
    1.82 +// keyframe. We may increase this value for an individual decoder if we
    1.83 +// encounter video frames which take a long time to decode.
    1.84 +static const uint32_t LOW_AUDIO_USECS = 300000;
    1.85 +
    1.86 +// If more than this many usecs of decoded audio is queued, we'll hold off
    1.87 +// decoding more audio. If we increase the low audio threshold (see
    1.88 +// LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not
    1.89 +// less than the low audio threshold.
    1.90 +const int64_t AMPLE_AUDIO_USECS = 1000000;
    1.91 +
    1.92 +// When we're only playing audio and we don't have a video stream, we divide
    1.93 +// AMPLE_AUDIO_USECS and LOW_AUDIO_USECS by the following value. This reduces
    1.94 +// the amount of decoded audio we buffer, reducing our memory usage. We only
    1.95 +// need to decode far ahead when we're decoding video using software decoding,
    1.96 +// as otherwise a long video decode could cause an audio underrun.
    1.97 +const int64_t NO_VIDEO_AMPLE_AUDIO_DIVISOR = 8;
    1.98 +
    1.99 +// Maximum number of bytes we'll allocate and write at once to the audio
   1.100 +// hardware when the audio stream contains missing frames and we're
   1.101 +// writing silence in order to fill the gap. We limit our silence-writes
   1.102 +// to 32KB in order to avoid allocating an impossibly large chunk of
   1.103 +// memory if we encounter a large chunk of silence.
   1.104 +const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024;
   1.105 +
   1.106 +// If we have fewer than LOW_VIDEO_FRAMES decoded frames, and
   1.107 +// we're not "prerolling video", we'll skip the video up to the next keyframe
   1.108 +// which is at or after the current playback position.
   1.109 +static const uint32_t LOW_VIDEO_FRAMES = 1;
   1.110 +
   1.111 +// Arbitrary "frame duration" when playing only audio.
   1.112 +static const int AUDIO_DURATION_USECS = 40000;
   1.113 +
   1.114 +// If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we
   1.115 +// use this as a factor in all our calculations. Increasing this will cause
   1.116 +// us to be more likely to increase our low audio threshold, and to
   1.117 +// increase it by more.
   1.118 +static const int THRESHOLD_FACTOR = 2;
   1.119 +
   1.120 +// If we have less than this much undecoded data available, we'll consider
   1.121 +// ourselves to be running low on undecoded data. We determine how much
   1.122 +// undecoded data we have remaining using the reader's GetBuffered()
   1.123 +// implementation.
   1.124 +static const int64_t LOW_DATA_THRESHOLD_USECS = 5000000;
   1.125 +
   1.126 +// LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise
   1.127 +// the skip-to-keyframe logic can activate when we're running low on data.
   1.128 +static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS,
   1.129 +              "LOW_DATA_THRESHOLD_USECS is too small");
   1.130 +
   1.131 +// Amount of excess usecs of data to add in to the "should we buffer" calculation.
   1.132 +static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 60000;
   1.133 +
   1.134 +// If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting
   1.135 +// decoding, we'll enter "quick buffering" mode, which exits a lot sooner than
   1.136 +// normal buffering mode. This exists so that if the decode-ahead exhausts the
   1.137 +// downloaded data while decode/playback is just starting up (for example
   1.138 +// after a seek while the media is still playing, or when playing a media
   1.139 +// as soon as it's load started), we won't necessarily stop for 30s and wait
   1.140 +// for buffering. We may actually be able to playback in this case, so exit
   1.141 +// buffering early and try to play. If it turns out we can't play, we'll fall
   1.142 +// back to buffering normally.
   1.143 +static const uint32_t QUICK_BUFFER_THRESHOLD_USECS = 2000000;
   1.144 +
   1.145 +// If we're quick buffering, we'll remain in buffering mode while we have less than
   1.146 +// QUICK_BUFFERING_LOW_DATA_USECS of decoded data available.
   1.147 +static const uint32_t QUICK_BUFFERING_LOW_DATA_USECS = 1000000;
   1.148 +
   1.149 +// If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit
   1.150 +// quick buffering in a timely fashion, as the decode pauses when it
   1.151 +// reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach
   1.152 +// QUICK_BUFFERING_LOW_DATA_USECS.
   1.153 +static_assert(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS,
   1.154 +              "QUICK_BUFFERING_LOW_DATA_USECS is too large");
   1.155 +
   1.156 +// This value has been chosen empirically.
   1.157 +static const uint32_t AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS = 200000;
   1.158 +
   1.159 +// The amount of instability we tollerate in calls to
   1.160 +// MediaDecoderStateMachine::UpdateEstimatedDuration(); changes of duration
   1.161 +// less than this are ignored, as they're assumed to be the result of
   1.162 +// instability in the duration estimation.
   1.163 +static const int64_t ESTIMATED_DURATION_FUZZ_FACTOR_USECS = USECS_PER_S / 2;
   1.164 +
   1.165 +static TimeDuration UsecsToDuration(int64_t aUsecs) {
   1.166 +  return TimeDuration::FromMilliseconds(static_cast<double>(aUsecs) / USECS_PER_MS);
   1.167 +}
   1.168 +
   1.169 +static int64_t DurationToUsecs(TimeDuration aDuration) {
   1.170 +  return static_cast<int64_t>(aDuration.ToSeconds() * USECS_PER_S);
   1.171 +}
   1.172 +
   1.173 +MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
   1.174 +                                                   MediaDecoderReader* aReader,
   1.175 +                                                   bool aRealTime) :
   1.176 +  mDecoder(aDecoder),
   1.177 +  mState(DECODER_STATE_DECODING_METADATA),
   1.178 +  mInRunningStateMachine(false),
   1.179 +  mSyncPointInMediaStream(-1),
   1.180 +  mSyncPointInDecodedStream(-1),
   1.181 +  mResetPlayStartTime(false),
   1.182 +  mPlayDuration(0),
   1.183 +  mStartTime(-1),
   1.184 +  mEndTime(-1),
   1.185 +  mFragmentEndTime(-1),
   1.186 +  mReader(aReader),
   1.187 +  mCurrentFrameTime(0),
   1.188 +  mAudioStartTime(-1),
   1.189 +  mAudioEndTime(-1),
   1.190 +  mVideoFrameEndTime(-1),
   1.191 +  mVolume(1.0),
   1.192 +  mPlaybackRate(1.0),
   1.193 +  mPreservesPitch(true),
   1.194 +  mBasePosition(0),
   1.195 +  mAmpleVideoFrames(2),
   1.196 +  mLowAudioThresholdUsecs(LOW_AUDIO_USECS),
   1.197 +  mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS),
   1.198 +  mDispatchedAudioDecodeTask(false),
   1.199 +  mDispatchedVideoDecodeTask(false),
   1.200 +  mIsReaderIdle(false),
   1.201 +  mAudioCaptured(false),
   1.202 +  mTransportSeekable(true),
   1.203 +  mMediaSeekable(true),
   1.204 +  mPositionChangeQueued(false),
   1.205 +  mAudioCompleted(false),
   1.206 +  mGotDurationFromMetaData(false),
   1.207 +  mDispatchedEventToDecode(false),
   1.208 +  mStopAudioThread(true),
   1.209 +  mQuickBuffering(false),
   1.210 +  mMinimizePreroll(false),
   1.211 +  mDecodeThreadWaiting(false),
   1.212 +  mRealTime(aRealTime),
   1.213 +  mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED),
   1.214 +  mTimerId(0)
   1.215 +{
   1.216 +  MOZ_COUNT_CTOR(MediaDecoderStateMachine);
   1.217 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
   1.218 +
   1.219 +  // Only enable realtime mode when "media.realtime_decoder.enabled" is true.
   1.220 +  if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false)
   1.221 +    mRealTime = false;
   1.222 +
   1.223 +  mAmpleVideoFrames =
   1.224 +    std::max<uint32_t>(Preferences::GetUint("media.video-queue.default-size", 10), 3);
   1.225 +
   1.226 +  mBufferingWait = mRealTime ? 0 : BUFFERING_WAIT_S;
   1.227 +  mLowDataThresholdUsecs = mRealTime ? 0 : LOW_DATA_THRESHOLD_USECS;
   1.228 +
   1.229 +  mVideoPrerollFrames = mRealTime ? 0 : mAmpleVideoFrames / 2;
   1.230 +  mAudioPrerollUsecs = mRealTime ? 0 : LOW_AUDIO_USECS * 2;
   1.231 +
   1.232 +#ifdef XP_WIN
   1.233 +  // Ensure high precision timers are enabled on Windows, otherwise the state
   1.234 +  // machine thread isn't woken up at reliable intervals to set the next frame,
   1.235 +  // and we drop frames while painting. Note that multiple calls to this
   1.236 +  // function per-process is OK, provided each call is matched by a corresponding
   1.237 +  // timeEndPeriod() call.
   1.238 +  timeBeginPeriod(1);
   1.239 +#endif
   1.240 +}
   1.241 +
   1.242 +MediaDecoderStateMachine::~MediaDecoderStateMachine()
   1.243 +{
   1.244 +  MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread.");
   1.245 +  MOZ_COUNT_DTOR(MediaDecoderStateMachine);
   1.246 +  NS_ASSERTION(!mPendingWakeDecoder.get(),
   1.247 +               "WakeDecoder should have been revoked already");
   1.248 +
   1.249 +  MOZ_ASSERT(!mDecodeTaskQueue, "Should be released in SHUTDOWN");
   1.250 +  // No need to cancel the timer here for we've done that in SHUTDOWN.
   1.251 +  MOZ_ASSERT(!mTimer, "Should be released in SHUTDOWN");
   1.252 +  mReader = nullptr;
   1.253 +
   1.254 +#ifdef XP_WIN
   1.255 +  timeEndPeriod(1);
   1.256 +#endif
   1.257 +}
   1.258 +
   1.259 +bool MediaDecoderStateMachine::HasFutureAudio() {
   1.260 +  AssertCurrentThreadInMonitor();
   1.261 +  NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio");
   1.262 +  // We've got audio ready to play if:
   1.263 +  // 1. We've not completed playback of audio, and
   1.264 +  // 2. we either have more than the threshold of decoded audio available, or
   1.265 +  //    we've completely decoded all audio (but not finished playing it yet
   1.266 +  //    as per 1).
   1.267 +  return !mAudioCompleted &&
   1.268 +         (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished());
   1.269 +}
   1.270 +
   1.271 +bool MediaDecoderStateMachine::HaveNextFrameData() {
   1.272 +  AssertCurrentThreadInMonitor();
   1.273 +  return (!HasAudio() || HasFutureAudio()) &&
   1.274 +         (!HasVideo() || VideoQueue().GetSize() > 0);
   1.275 +}
   1.276 +
   1.277 +int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() {
   1.278 +  NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(),
   1.279 +               "Should be on decode thread or state machine thread");
   1.280 +  AssertCurrentThreadInMonitor();
   1.281 +  int64_t audioDecoded = AudioQueue().Duration();
   1.282 +  if (mAudioEndTime != -1) {
   1.283 +    audioDecoded += mAudioEndTime - GetMediaTime();
   1.284 +  }
   1.285 +  return audioDecoded;
   1.286 +}
   1.287 +
   1.288 +void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio,
   1.289 +                                               DecodedStreamData* aStream,
   1.290 +                                               AudioSegment* aOutput)
   1.291 +{
   1.292 +  NS_ASSERTION(OnDecodeThread() ||
   1.293 +               OnStateMachineThread(), "Should be on decode thread or state machine thread");
   1.294 +  AssertCurrentThreadInMonitor();
   1.295 +
   1.296 +  if (aAudio->mTime <= aStream->mLastAudioPacketTime) {
   1.297 +    // ignore packet that we've already processed
   1.298 +    return;
   1.299 +  }
   1.300 +  aStream->mLastAudioPacketTime = aAudio->mTime;
   1.301 +  aStream->mLastAudioPacketEndTime = aAudio->GetEndTime();
   1.302 +
   1.303 +  // This logic has to mimic AudioLoop closely to make sure we write
   1.304 +  // the exact same silences
   1.305 +  CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudio.mRate,
   1.306 +      aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten;
   1.307 +  CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudio.mRate, aAudio->mTime);
   1.308 +  if (!audioWrittenOffset.isValid() || !frameOffset.isValid())
   1.309 +    return;
   1.310 +  if (audioWrittenOffset.value() < frameOffset.value()) {
   1.311 +    // Write silence to catch up
   1.312 +    VERBOSE_LOG("writing %d frames of silence to MediaStream",
   1.313 +                int32_t(frameOffset.value() - audioWrittenOffset.value()));
   1.314 +    AudioSegment silence;
   1.315 +    silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value());
   1.316 +    aStream->mAudioFramesWritten += silence.GetDuration();
   1.317 +    aOutput->AppendFrom(&silence);
   1.318 +  }
   1.319 +
   1.320 +  int64_t offset;
   1.321 +  if (aStream->mAudioFramesWritten == 0) {
   1.322 +    NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(),
   1.323 +                 "Otherwise we'd have taken the write-silence path");
   1.324 +    // We're starting in the middle of a packet. Split the packet.
   1.325 +    offset = audioWrittenOffset.value() - frameOffset.value();
   1.326 +  } else {
   1.327 +    // Write the entire packet.
   1.328 +    offset = 0;
   1.329 +  }
   1.330 +
   1.331 +  if (offset >= aAudio->mFrames)
   1.332 +    return;
   1.333 +
   1.334 +  aAudio->EnsureAudioBuffer();
   1.335 +  nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
   1.336 +  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
   1.337 +  nsAutoTArray<const AudioDataValue*,2> channels;
   1.338 +  for (uint32_t i = 0; i < aAudio->mChannels; ++i) {
   1.339 +    channels.AppendElement(bufferData + i*aAudio->mFrames + offset);
   1.340 +  }
   1.341 +  aOutput->AppendFrames(buffer.forget(), channels, aAudio->mFrames);
   1.342 +  VERBOSE_LOG("writing %d frames of data to MediaStream for AudioData at %lld",
   1.343 +              aAudio->mFrames - int32_t(offset), aAudio->mTime);
   1.344 +  aStream->mAudioFramesWritten += aAudio->mFrames - int32_t(offset);
   1.345 +}
   1.346 +
   1.347 +static void WriteVideoToMediaStream(layers::Image* aImage,
   1.348 +                                    int64_t aDuration,
   1.349 +                                    const IntSize& aIntrinsicSize,
   1.350 +                                    VideoSegment* aOutput)
   1.351 +{
   1.352 +  nsRefPtr<layers::Image> image = aImage;
   1.353 +  aOutput->AppendFrame(image.forget(), aDuration, aIntrinsicSize);
   1.354 +}
   1.355 +
   1.356 +static const TrackID TRACK_AUDIO = 1;
   1.357 +static const TrackID TRACK_VIDEO = 2;
   1.358 +static const TrackRate RATE_VIDEO = USECS_PER_S;
   1.359 +
   1.360 +void MediaDecoderStateMachine::SendStreamData()
   1.361 +{
   1.362 +  NS_ASSERTION(OnDecodeThread() ||
   1.363 +               OnStateMachineThread(), "Should be on decode thread or state machine thread");
   1.364 +  AssertCurrentThreadInMonitor();
   1.365 +
   1.366 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
   1.367 +  if (!stream)
   1.368 +    return;
   1.369 +
   1.370 +  if (mState == DECODER_STATE_DECODING_METADATA)
   1.371 +    return;
   1.372 +
   1.373 +  // If there's still an audio thread alive, then we can't send any stream
   1.374 +  // data yet since both SendStreamData and the audio thread want to be in
   1.375 +  // charge of popping the audio queue. We're waiting for the audio thread
   1.376 +  // to die before sending anything to our stream.
   1.377 +  if (mAudioThread)
   1.378 +    return;
   1.379 +
   1.380 +  int64_t minLastAudioPacketTime = INT64_MAX;
   1.381 +  bool finished =
   1.382 +      (!mInfo.HasAudio() || AudioQueue().IsFinished()) &&
   1.383 +      (!mInfo.HasVideo() || VideoQueue().IsFinished());
   1.384 +  if (mDecoder->IsSameOriginMedia()) {
   1.385 +    SourceMediaStream* mediaStream = stream->mStream;
   1.386 +    StreamTime endPosition = 0;
   1.387 +
   1.388 +    if (!stream->mStreamInitialized) {
   1.389 +      if (mInfo.HasAudio()) {
   1.390 +        AudioSegment* audio = new AudioSegment();
   1.391 +        mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio);
   1.392 +        stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO,
   1.393 +            GetStateMachineThread(), GetWakeDecoderRunnable());
   1.394 +      }
   1.395 +      if (mInfo.HasVideo()) {
   1.396 +        VideoSegment* video = new VideoSegment();
   1.397 +        mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
   1.398 +        stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO,
   1.399 +            GetStateMachineThread(), GetWakeDecoderRunnable());
   1.400 +      }
   1.401 +      stream->mStreamInitialized = true;
   1.402 +    }
   1.403 +
   1.404 +    if (mInfo.HasAudio()) {
   1.405 +      nsAutoTArray<AudioData*,10> audio;
   1.406 +      // It's OK to hold references to the AudioData because while audio
   1.407 +      // is captured, only the decoder thread pops from the queue (see below).
   1.408 +      AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
   1.409 +      AudioSegment output;
   1.410 +      for (uint32_t i = 0; i < audio.Length(); ++i) {
   1.411 +        SendStreamAudio(audio[i], stream, &output);
   1.412 +      }
   1.413 +      if (output.GetDuration() > 0) {
   1.414 +        mediaStream->AppendToTrack(TRACK_AUDIO, &output);
   1.415 +      }
   1.416 +      if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
   1.417 +        mediaStream->EndTrack(TRACK_AUDIO);
   1.418 +        stream->mHaveSentFinishAudio = true;
   1.419 +      }
   1.420 +      minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime);
   1.421 +      endPosition = std::max(endPosition,
   1.422 +          TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten));
   1.423 +    }
   1.424 +
   1.425 +    if (mInfo.HasVideo()) {
   1.426 +      nsAutoTArray<VideoData*,10> video;
   1.427 +      // It's OK to hold references to the VideoData only the decoder thread
   1.428 +      // pops from the queue.
   1.429 +      VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video);
   1.430 +      VideoSegment output;
   1.431 +      for (uint32_t i = 0; i < video.Length(); ++i) {
   1.432 +        VideoData* v = video[i];
   1.433 +        if (stream->mNextVideoTime < v->mTime) {
   1.434 +          VERBOSE_LOG("writing last video to MediaStream %p for %lldus",
   1.435 +                      mediaStream, v->mTime - stream->mNextVideoTime);
   1.436 +          // Write last video frame to catch up. mLastVideoImage can be null here
   1.437 +          // which is fine, it just means there's no video.
   1.438 +          WriteVideoToMediaStream(stream->mLastVideoImage,
   1.439 +            v->mTime - stream->mNextVideoTime, stream->mLastVideoImageDisplaySize,
   1.440 +              &output);
   1.441 +          stream->mNextVideoTime = v->mTime;
   1.442 +        }
   1.443 +        if (stream->mNextVideoTime < v->GetEndTime()) {
   1.444 +          VERBOSE_LOG("writing video frame %lldus to MediaStream %p for %lldus",
   1.445 +                      v->mTime, mediaStream, v->GetEndTime() - stream->mNextVideoTime);
   1.446 +          WriteVideoToMediaStream(v->mImage,
   1.447 +              v->GetEndTime() - stream->mNextVideoTime, v->mDisplay,
   1.448 +              &output);
   1.449 +          stream->mNextVideoTime = v->GetEndTime();
   1.450 +          stream->mLastVideoImage = v->mImage;
   1.451 +          stream->mLastVideoImageDisplaySize = v->mDisplay;
   1.452 +        } else {
   1.453 +          VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream",
   1.454 +                      v->mTime, v->GetEndTime());
   1.455 +        }
   1.456 +      }
   1.457 +      if (output.GetDuration() > 0) {
   1.458 +        mediaStream->AppendToTrack(TRACK_VIDEO, &output);
   1.459 +      }
   1.460 +      if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) {
   1.461 +        mediaStream->EndTrack(TRACK_VIDEO);
   1.462 +        stream->mHaveSentFinishVideo = true;
   1.463 +      }
   1.464 +      endPosition = std::max(endPosition,
   1.465 +          TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime));
   1.466 +    }
   1.467 +
   1.468 +    if (!stream->mHaveSentFinish) {
   1.469 +      stream->mStream->AdvanceKnownTracksTime(endPosition);
   1.470 +    }
   1.471 +
   1.472 +    if (finished && !stream->mHaveSentFinish) {
   1.473 +      stream->mHaveSentFinish = true;
   1.474 +      stream->mStream->Finish();
   1.475 +    }
   1.476 +  }
   1.477 +
   1.478 +  if (mAudioCaptured) {
   1.479 +    // Discard audio packets that are no longer needed.
   1.480 +    while (true) {
   1.481 +      const AudioData* a = AudioQueue().PeekFront();
   1.482 +      // Packet times are not 100% reliable so this may discard packets that
   1.483 +      // actually contain data for mCurrentFrameTime. This means if someone might
   1.484 +      // create a new output stream and we actually don't have the audio for the
   1.485 +      // very start. That's OK, we'll play silence instead for a brief moment.
   1.486 +      // That's OK. Seeking to this time would have a similar issue for such
   1.487 +      // badly muxed resources.
   1.488 +      if (!a || a->GetEndTime() >= minLastAudioPacketTime)
   1.489 +        break;
   1.490 +      mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime());
   1.491 +      delete AudioQueue().PopFront();
   1.492 +    }
   1.493 +
   1.494 +    if (finished) {
   1.495 +      mAudioCompleted = true;
   1.496 +      UpdateReadyState();
   1.497 +    }
   1.498 +  }
   1.499 +}
   1.500 +
   1.501 +MediaDecoderStateMachine::WakeDecoderRunnable*
   1.502 +MediaDecoderStateMachine::GetWakeDecoderRunnable()
   1.503 +{
   1.504 +  AssertCurrentThreadInMonitor();
   1.505 +
   1.506 +  if (!mPendingWakeDecoder.get()) {
   1.507 +    mPendingWakeDecoder = new WakeDecoderRunnable(this);
   1.508 +  }
   1.509 +  return mPendingWakeDecoder.get();
   1.510 +}
   1.511 +
   1.512 +bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
   1.513 +{
   1.514 +  AssertCurrentThreadInMonitor();
   1.515 +
   1.516 +  if (AudioQueue().GetSize() == 0 ||
   1.517 +      GetDecodedAudioDuration() < aAmpleAudioUSecs) {
   1.518 +    return false;
   1.519 +  }
   1.520 +  if (!mAudioCaptured) {
   1.521 +    return true;
   1.522 +  }
   1.523 +
   1.524 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
   1.525 +  if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) {
   1.526 +    if (!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) {
   1.527 +      return false;
   1.528 +    }
   1.529 +    stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO,
   1.530 +        GetStateMachineThread(), GetWakeDecoderRunnable());
   1.531 +  }
   1.532 +
   1.533 +  return true;
   1.534 +}
   1.535 +
   1.536 +bool MediaDecoderStateMachine::HaveEnoughDecodedVideo()
   1.537 +{
   1.538 +  AssertCurrentThreadInMonitor();
   1.539 +
   1.540 +  if (static_cast<uint32_t>(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) {
   1.541 +    return false;
   1.542 +  }
   1.543 +
   1.544 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
   1.545 +  if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) {
   1.546 +    if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) {
   1.547 +      return false;
   1.548 +    }
   1.549 +    stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO,
   1.550 +        GetStateMachineThread(), GetWakeDecoderRunnable());
   1.551 +  }
   1.552 +
   1.553 +  return true;
   1.554 +}
   1.555 +
   1.556 +bool
   1.557 +MediaDecoderStateMachine::NeedToDecodeVideo()
   1.558 +{
   1.559 +  AssertCurrentThreadInMonitor();
   1.560 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
   1.561 +               "Should be on state machine or decode thread.");
   1.562 +  return mIsVideoDecoding &&
   1.563 +         !mMinimizePreroll &&
   1.564 +         !HaveEnoughDecodedVideo();
   1.565 +}
   1.566 +
   1.567 +void
   1.568 +MediaDecoderStateMachine::DecodeVideo()
   1.569 +{
   1.570 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.571 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   1.572 +
   1.573 +  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
   1.574 +    mDispatchedVideoDecodeTask = false;
   1.575 +    return;
   1.576 +  }
   1.577 +  EnsureActive();
   1.578 +
   1.579 +  // We don't want to consider skipping to the next keyframe if we've
   1.580 +  // only just started up the decode loop, so wait until we've decoded
   1.581 +  // some frames before enabling the keyframe skip logic on video.
   1.582 +  if (mIsVideoPrerolling &&
   1.583 +      (static_cast<uint32_t>(VideoQueue().GetSize())
   1.584 +        >= mVideoPrerollFrames * mPlaybackRate))
   1.585 +  {
   1.586 +    mIsVideoPrerolling = false;
   1.587 +  }
   1.588 +
   1.589 +  // We'll skip the video decode to the nearest keyframe if we're low on
   1.590 +  // audio, or if we're low on video, provided we're not running low on
   1.591 +  // data to decode. If we're running low on downloaded data to decode,
   1.592 +  // we won't start keyframe skipping, as we'll be pausing playback to buffer
   1.593 +  // soon anyway and we'll want to be able to display frames immediately
   1.594 +  // after buffering finishes.
   1.595 +  if (mState == DECODER_STATE_DECODING &&
   1.596 +      !mSkipToNextKeyFrame &&
   1.597 +      mIsVideoDecoding &&
   1.598 +      ((!mIsAudioPrerolling && mIsAudioDecoding &&
   1.599 +        GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) ||
   1.600 +        (!mIsVideoPrerolling && mIsVideoDecoding &&
   1.601 +         // don't skip frame when |clock time| <= |mVideoFrameEndTime| for
   1.602 +         // we are still in the safe range without underrunning video frames
   1.603 +         GetClock() > mVideoFrameEndTime &&
   1.604 +        (static_cast<uint32_t>(VideoQueue().GetSize())
   1.605 +          < LOW_VIDEO_FRAMES * mPlaybackRate))) &&
   1.606 +      !HasLowUndecodedData())
   1.607 +  {
   1.608 +    mSkipToNextKeyFrame = true;
   1.609 +    DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe");
   1.610 +  }
   1.611 +
   1.612 +  // Time the video decode, so that if it's slow, we can increase our low
   1.613 +  // audio threshold to reduce the chance of an audio underrun while we're
   1.614 +  // waiting for a video decode to complete.
   1.615 +  TimeDuration decodeTime;
   1.616 +  {
   1.617 +    int64_t currentTime = GetMediaTime();
   1.618 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
   1.619 +    TimeStamp start = TimeStamp::Now();
   1.620 +    mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime);
   1.621 +    decodeTime = TimeStamp::Now() - start;
   1.622 +  }
   1.623 +  if (!mIsVideoDecoding) {
   1.624 +    // Playback ended for this stream, close the sample queue.
   1.625 +    VideoQueue().Finish();
   1.626 +    CheckIfDecodeComplete();
   1.627 +  }
   1.628 +
   1.629 +  if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs &&
   1.630 +      !HasLowUndecodedData())
   1.631 +  {
   1.632 +    mLowAudioThresholdUsecs =
   1.633 +      std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS);
   1.634 +    mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs,
   1.635 +                                          mAmpleAudioThresholdUsecs);
   1.636 +    DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld",
   1.637 +                mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs);
   1.638 +  }
   1.639 +
   1.640 +  SendStreamData();
   1.641 +
   1.642 +  // The ready state can change when we've decoded data, so update the
   1.643 +  // ready state, so that DOM events can fire.
   1.644 +  UpdateReadyState();
   1.645 +
   1.646 +  mDispatchedVideoDecodeTask = false;
   1.647 +  DispatchDecodeTasksIfNeeded();
   1.648 +}
   1.649 +
   1.650 +bool
   1.651 +MediaDecoderStateMachine::NeedToDecodeAudio()
   1.652 +{
   1.653 +  AssertCurrentThreadInMonitor();
   1.654 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
   1.655 +               "Should be on state machine or decode thread.");
   1.656 +  return mIsAudioDecoding &&
   1.657 +         !mMinimizePreroll &&
   1.658 +         !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate);
   1.659 +}
   1.660 +
   1.661 +void
   1.662 +MediaDecoderStateMachine::DecodeAudio()
   1.663 +{
   1.664 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.665 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
   1.666 +
   1.667 +  if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) {
   1.668 +    mDispatchedAudioDecodeTask = false;
   1.669 +    return;
   1.670 +  }
   1.671 +  EnsureActive();
   1.672 +
   1.673 +  // We don't want to consider skipping to the next keyframe if we've
   1.674 +  // only just started up the decode loop, so wait until we've decoded
   1.675 +  // some audio data before enabling the keyframe skip logic on audio.
   1.676 +  if (mIsAudioPrerolling &&
   1.677 +      GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) {
   1.678 +    mIsAudioPrerolling = false;
   1.679 +  }
   1.680 +
   1.681 +  {
   1.682 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
   1.683 +    mIsAudioDecoding = mReader->DecodeAudioData();
   1.684 +  }
   1.685 +  if (!mIsAudioDecoding) {
   1.686 +    // Playback ended for this stream, close the sample queue.
   1.687 +    AudioQueue().Finish();
   1.688 +    CheckIfDecodeComplete();
   1.689 +  }
   1.690 +
   1.691 +  SendStreamData();
   1.692 +
   1.693 +  // Notify to ensure that the AudioLoop() is not waiting, in case it was
   1.694 +  // waiting for more audio to be decoded.
   1.695 +  mDecoder->GetReentrantMonitor().NotifyAll();
   1.696 +
   1.697 +  // The ready state can change when we've decoded data, so update the
   1.698 +  // ready state, so that DOM events can fire.
   1.699 +  UpdateReadyState();
   1.700 +
   1.701 +  mDispatchedAudioDecodeTask = false;
   1.702 +  DispatchDecodeTasksIfNeeded();
   1.703 +}
   1.704 +
   1.705 +void
   1.706 +MediaDecoderStateMachine::CheckIfDecodeComplete()
   1.707 +{
   1.708 +  AssertCurrentThreadInMonitor();
   1.709 +  if (mState == DECODER_STATE_SHUTDOWN ||
   1.710 +      mState == DECODER_STATE_SEEKING ||
   1.711 +      mState == DECODER_STATE_COMPLETED) {
   1.712 +    // Don't change our state if we've already been shutdown, or we're seeking,
   1.713 +    // since we don't want to abort the shutdown or seek processes.
   1.714 +    return;
   1.715 +  }
   1.716 +  MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding);
   1.717 +  MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding);
   1.718 +  if (!mIsVideoDecoding && !mIsAudioDecoding) {
   1.719 +    // We've finished decoding all active streams,
   1.720 +    // so move to COMPLETED state.
   1.721 +    mState = DECODER_STATE_COMPLETED;
   1.722 +    DispatchDecodeTasksIfNeeded();
   1.723 +    ScheduleStateMachine();
   1.724 +  }
   1.725 +  DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted",
   1.726 +              ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT "));
   1.727 +}
   1.728 +
   1.729 +bool MediaDecoderStateMachine::IsPlaying()
   1.730 +{
   1.731 +  AssertCurrentThreadInMonitor();
   1.732 +
   1.733 +  return !mPlayStartTime.IsNull();
   1.734 +}
   1.735 +
   1.736 +// If we have already written enough frames to the AudioStream, start the
   1.737 +// playback.
   1.738 +static void
   1.739 +StartAudioStreamPlaybackIfNeeded(AudioStream* aStream)
   1.740 +{
   1.741 +  // We want to have enough data in the buffer to start the stream.
   1.742 +  if (static_cast<double>(aStream->GetWritten()) / aStream->GetRate() >=
   1.743 +      static_cast<double>(AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) {
   1.744 +    aStream->Start();
   1.745 +  }
   1.746 +}
   1.747 +
   1.748 +static void WriteSilence(AudioStream* aStream, uint32_t aFrames)
   1.749 +{
   1.750 +  uint32_t numSamples = aFrames * aStream->GetChannels();
   1.751 +  nsAutoTArray<AudioDataValue, 1000> buf;
   1.752 +  buf.SetLength(numSamples);
   1.753 +  memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue));
   1.754 +  aStream->Write(buf.Elements(), aFrames);
   1.755 +
   1.756 +  StartAudioStreamPlaybackIfNeeded(aStream);
   1.757 +}
   1.758 +
   1.759 +void MediaDecoderStateMachine::AudioLoop()
   1.760 +{
   1.761 +  NS_ASSERTION(OnAudioThread(), "Should be on audio thread.");
   1.762 +  DECODER_LOG(PR_LOG_DEBUG, "Begun audio thread/loop");
   1.763 +  int64_t audioDuration = 0;
   1.764 +  int64_t audioStartTime = -1;
   1.765 +  uint32_t channels, rate;
   1.766 +  double volume = -1;
   1.767 +  bool setVolume;
   1.768 +  double playbackRate = -1;
   1.769 +  bool setPlaybackRate;
   1.770 +  bool preservesPitch;
   1.771 +  bool setPreservesPitch;
   1.772 +  AudioChannel audioChannel;
   1.773 +
   1.774 +  {
   1.775 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.776 +    mAudioCompleted = false;
   1.777 +    audioStartTime = mAudioStartTime;
   1.778 +    NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now");
   1.779 +    channels = mInfo.mAudio.mChannels;
   1.780 +    rate = mInfo.mAudio.mRate;
   1.781 +
   1.782 +    audioChannel = mDecoder->GetAudioChannel();
   1.783 +    volume = mVolume;
   1.784 +    preservesPitch = mPreservesPitch;
   1.785 +    playbackRate = mPlaybackRate;
   1.786 +  }
   1.787 +
   1.788 +  {
   1.789 +    // AudioStream initialization can block for extended periods in unusual
   1.790 +    // circumstances, so we take care to drop the decoder monitor while
   1.791 +    // initializing.
   1.792 +    RefPtr<AudioStream> audioStream(new AudioStream());
   1.793 +    audioStream->Init(channels, rate, audioChannel, AudioStream::HighLatency);
   1.794 +    audioStream->SetVolume(volume);
   1.795 +    if (audioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
   1.796 +      NS_WARNING("Setting the pitch preservation failed at AudioLoop start.");
   1.797 +    }
   1.798 +    if (playbackRate != 1.0) {
   1.799 +      NS_ASSERTION(playbackRate != 0,
   1.800 +                   "Don't set the playbackRate to 0 on an AudioStream.");
   1.801 +      if (audioStream->SetPlaybackRate(playbackRate) != NS_OK) {
   1.802 +        NS_WARNING("Setting the playback rate failed at AudioLoop start.");
   1.803 +      }
   1.804 +    }
   1.805 +
   1.806 +    {
   1.807 +      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.808 +      mAudioStream = audioStream.forget();
   1.809 +    }
   1.810 +  }
   1.811 +
   1.812 +  while (1) {
   1.813 +    // Wait while we're not playing, and we're not shutting down, or we're
   1.814 +    // playing and we've got no audio to play.
   1.815 +    {
   1.816 +      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.817 +      NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA,
   1.818 +                   "Should have meta data before audio started playing.");
   1.819 +      while (mState != DECODER_STATE_SHUTDOWN &&
   1.820 +             !mStopAudioThread &&
   1.821 +             (!IsPlaying() ||
   1.822 +              mState == DECODER_STATE_BUFFERING ||
   1.823 +              (AudioQueue().GetSize() == 0 &&
   1.824 +               !AudioQueue().AtEndOfStream())))
   1.825 +      {
   1.826 +        if (!IsPlaying() && !mAudioStream->IsPaused()) {
   1.827 +          mAudioStream->Pause();
   1.828 +        }
   1.829 +        mon.Wait();
   1.830 +      }
   1.831 +
   1.832 +      // If we're shutting down, break out and exit the audio thread.
   1.833 +      // Also break out if audio is being captured.
   1.834 +      if (mState == DECODER_STATE_SHUTDOWN ||
   1.835 +          mStopAudioThread ||
   1.836 +          AudioQueue().AtEndOfStream())
   1.837 +      {
   1.838 +        break;
   1.839 +      }
   1.840 +
   1.841 +      // We only want to go to the expense of changing the volume if
   1.842 +      // the volume has changed.
   1.843 +      setVolume = volume != mVolume;
   1.844 +      volume = mVolume;
   1.845 +
   1.846 +      // Same for the playbackRate.
   1.847 +      setPlaybackRate = playbackRate != mPlaybackRate;
   1.848 +      playbackRate = mPlaybackRate;
   1.849 +
   1.850 +      // Same for the pitch preservation.
   1.851 +      setPreservesPitch = preservesPitch != mPreservesPitch;
   1.852 +      preservesPitch = mPreservesPitch;
   1.853 +
   1.854 +      if (IsPlaying() && mAudioStream->IsPaused()) {
   1.855 +        mAudioStream->Resume();
   1.856 +      }
   1.857 +    }
   1.858 +
   1.859 +    if (setVolume) {
   1.860 +      mAudioStream->SetVolume(volume);
   1.861 +    }
   1.862 +    if (setPlaybackRate) {
   1.863 +      NS_ASSERTION(playbackRate != 0,
   1.864 +                   "Don't set the playbackRate to 0 in the AudioStreams");
   1.865 +      if (mAudioStream->SetPlaybackRate(playbackRate) != NS_OK) {
   1.866 +        NS_WARNING("Setting the playback rate failed in AudioLoop.");
   1.867 +      }
   1.868 +    }
   1.869 +    if (setPreservesPitch) {
   1.870 +      if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) {
   1.871 +        NS_WARNING("Setting the pitch preservation failed in AudioLoop.");
   1.872 +      }
   1.873 +    }
   1.874 +    NS_ASSERTION(AudioQueue().GetSize() > 0,
   1.875 +                 "Should have data to play");
   1.876 +    // See if there's a gap in the audio. If there is, push silence into the
   1.877 +    // audio hardware, so we can play across the gap.
   1.878 +    const AudioData* s = AudioQueue().PeekFront();
   1.879 +
   1.880 +    // Calculate the number of frames that have been pushed onto the audio
   1.881 +    // hardware.
   1.882 +    CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) +
   1.883 +                                              audioDuration;
   1.884 +    // Calculate the timestamp of the next chunk of audio in numbers of
   1.885 +    // samples.
   1.886 +    CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate);
   1.887 +    CheckedInt64 missingFrames = sampleTime - playedFrames;
   1.888 +    if (!missingFrames.isValid() || !sampleTime.isValid()) {
   1.889 +      NS_WARNING("Int overflow adding in AudioLoop()");
   1.890 +      break;
   1.891 +    }
   1.892 +
   1.893 +    int64_t framesWritten = 0;
   1.894 +    if (missingFrames.value() > 0) {
   1.895 +      // The next audio chunk begins some time after the end of the last chunk
   1.896 +      // we pushed to the audio hardware. We must push silence into the audio
   1.897 +      // hardware so that the next audio chunk begins playback at the correct
   1.898 +      // time.
   1.899 +      missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value());
   1.900 +      VERBOSE_LOG("playing %d frames of silence", int32_t(missingFrames.value()));
   1.901 +      framesWritten = PlaySilence(static_cast<uint32_t>(missingFrames.value()),
   1.902 +                                  channels, playedFrames.value());
   1.903 +    } else {
   1.904 +      framesWritten = PlayFromAudioQueue(sampleTime.value(), channels);
   1.905 +    }
   1.906 +    audioDuration += framesWritten;
   1.907 +    {
   1.908 +      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.909 +      CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime;
   1.910 +      if (!playedUsecs.isValid()) {
   1.911 +        NS_WARNING("Int overflow calculating audio end time");
   1.912 +        break;
   1.913 +      }
   1.914 +      mAudioEndTime = playedUsecs.value();
   1.915 +    }
   1.916 +  }
   1.917 +  {
   1.918 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.919 +    if (AudioQueue().AtEndOfStream() &&
   1.920 +        mState != DECODER_STATE_SHUTDOWN &&
   1.921 +        !mStopAudioThread)
   1.922 +    {
   1.923 +      // If the media was too short to trigger the start of the audio stream,
   1.924 +      // start it now.
   1.925 +      mAudioStream->Start();
   1.926 +      // Last frame pushed to audio hardware, wait for the audio to finish,
   1.927 +      // before the audio thread terminates.
   1.928 +      bool seeking = false;
   1.929 +      {
   1.930 +        int64_t oldPosition = -1;
   1.931 +        int64_t position = GetMediaTime();
   1.932 +        while (oldPosition != position &&
   1.933 +               mAudioEndTime - position > 0 &&
   1.934 +               mState != DECODER_STATE_SEEKING &&
   1.935 +               mState != DECODER_STATE_SHUTDOWN)
   1.936 +        {
   1.937 +          const int64_t DRAIN_BLOCK_USECS = 100000;
   1.938 +          Wait(std::min(mAudioEndTime - position, DRAIN_BLOCK_USECS));
   1.939 +          oldPosition = position;
   1.940 +          position = GetMediaTime();
   1.941 +        }
   1.942 +        seeking = mState == DECODER_STATE_SEEKING;
   1.943 +      }
   1.944 +
   1.945 +      if (!seeking && !mAudioStream->IsPaused()) {
   1.946 +        {
   1.947 +          ReentrantMonitorAutoExit exit(mDecoder->GetReentrantMonitor());
   1.948 +          mAudioStream->Drain();
   1.949 +        }
   1.950 +      }
   1.951 +    }
   1.952 +  }
   1.953 +  DECODER_LOG(PR_LOG_DEBUG, "Reached audio stream end.");
   1.954 +  {
   1.955 +    // Must hold lock while shutting down and anulling the audio stream to prevent
   1.956 +    // state machine thread trying to use it while we're destroying it.
   1.957 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.958 +    mAudioStream->Shutdown();
   1.959 +    mAudioStream = nullptr;
   1.960 +    if (!mAudioCaptured) {
   1.961 +      mAudioCompleted = true;
   1.962 +      UpdateReadyState();
   1.963 +      // Kick the decode thread; it may be sleeping waiting for this to finish.
   1.964 +      mDecoder->GetReentrantMonitor().NotifyAll();
   1.965 +    }
   1.966 +  }
   1.967 +
   1.968 +  DECODER_LOG(PR_LOG_DEBUG, "Audio stream finished playing, audio thread exit");
   1.969 +}
   1.970 +
   1.971 +uint32_t MediaDecoderStateMachine::PlaySilence(uint32_t aFrames,
   1.972 +                                                   uint32_t aChannels,
   1.973 +                                                   uint64_t aFrameOffset)
   1.974 +
   1.975 +{
   1.976 +  NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   1.977 +  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
   1.978 +  uint32_t maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue);
   1.979 +  uint32_t frames = std::min(aFrames, maxFrames);
   1.980 +  WriteSilence(mAudioStream, frames);
   1.981 +  return frames;
   1.982 +}
   1.983 +
   1.984 +uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset,
   1.985 +                                                      uint32_t aChannels)
   1.986 +{
   1.987 +  NS_ASSERTION(OnAudioThread(), "Only call on audio thread.");
   1.988 +  NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
   1.989 +  nsAutoPtr<AudioData> audio(AudioQueue().PopFront());
   1.990 +  {
   1.991 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.992 +    NS_WARN_IF_FALSE(IsPlaying(), "Should be playing");
   1.993 +    // Awaken the decode loop if it's waiting for space to free up in the
   1.994 +    // audio queue.
   1.995 +    mDecoder->GetReentrantMonitor().NotifyAll();
   1.996 +  }
   1.997 +  int64_t offset = -1;
   1.998 +  uint32_t frames = 0;
   1.999 +  VERBOSE_LOG("playing %d frames of data to stream for AudioData at %lld",
  1.1000 +              audio->mFrames, audio->mTime);
  1.1001 +  mAudioStream->Write(audio->mAudioData,
  1.1002 +                      audio->mFrames);
  1.1003 +
  1.1004 +  aChannels = mAudioStream->GetOutChannels();
  1.1005 +
  1.1006 +  StartAudioStreamPlaybackIfNeeded(mAudioStream);
  1.1007 +
  1.1008 +  offset = audio->mOffset;
  1.1009 +  frames = audio->mFrames;
  1.1010 +
  1.1011 +  if (offset != -1) {
  1.1012 +    mDecoder->UpdatePlaybackOffset(offset);
  1.1013 +  }
  1.1014 +  return frames;
  1.1015 +}
  1.1016 +
  1.1017 +nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor)
  1.1018 +{
  1.1019 +  MOZ_ASSERT(NS_IsMainThread());
  1.1020 +
  1.1021 +  RefPtr<SharedThreadPool> decodePool(
  1.1022 +    SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"),
  1.1023 +                          Preferences::GetUint("media.num-decode-threads", 25)));
  1.1024 +  NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE);
  1.1025 +
  1.1026 +  RefPtr<SharedThreadPool> stateMachinePool(
  1.1027 +    SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1));
  1.1028 +  NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE);
  1.1029 +
  1.1030 +  mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget());
  1.1031 +  NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE);
  1.1032 +
  1.1033 +  MediaDecoderReader* cloneReader = nullptr;
  1.1034 +  if (aCloneDonor) {
  1.1035 +    cloneReader = aCloneDonor->mReader;
  1.1036 +  }
  1.1037 +
  1.1038 +  mStateMachineThreadPool = stateMachinePool;
  1.1039 +
  1.1040 +  nsresult rv;
  1.1041 +  mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv);
  1.1042 +  NS_ENSURE_SUCCESS(rv, rv);
  1.1043 +  rv = mTimer->SetTarget(GetStateMachineThread());
  1.1044 +  NS_ENSURE_SUCCESS(rv, rv);
  1.1045 +
  1.1046 +  return mReader->Init(cloneReader);
  1.1047 +}
  1.1048 +
  1.1049 +void MediaDecoderStateMachine::StopPlayback()
  1.1050 +{
  1.1051 +  DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()");
  1.1052 +
  1.1053 +  AssertCurrentThreadInMonitor();
  1.1054 +
  1.1055 +  mDecoder->NotifyPlaybackStopped();
  1.1056 +
  1.1057 +  if (IsPlaying()) {
  1.1058 +    mPlayDuration = GetClock();
  1.1059 +    mPlayStartTime = TimeStamp();
  1.1060 +  }
  1.1061 +  // Notify the audio thread, so that it notices that we've stopped playing,
  1.1062 +  // so it can pause audio playback.
  1.1063 +  mDecoder->GetReentrantMonitor().NotifyAll();
  1.1064 +  NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()");
  1.1065 +  mDecoder->UpdateStreamBlockingForStateMachinePlaying();
  1.1066 +
  1.1067 +  DispatchDecodeTasksIfNeeded();
  1.1068 +}
  1.1069 +
  1.1070 +void MediaDecoderStateMachine::SetSyncPointForMediaStream()
  1.1071 +{
  1.1072 +  AssertCurrentThreadInMonitor();
  1.1073 +
  1.1074 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
  1.1075 +  if (!stream) {
  1.1076 +    return;
  1.1077 +  }
  1.1078 +
  1.1079 +  mSyncPointInMediaStream = stream->GetLastOutputTime();
  1.1080 +  mSyncPointInDecodedStream = mStartTime + mPlayDuration;
  1.1081 +}
  1.1082 +
  1.1083 +int64_t MediaDecoderStateMachine::GetCurrentTimeViaMediaStreamSync()
  1.1084 +{
  1.1085 +  AssertCurrentThreadInMonitor();
  1.1086 +  NS_ASSERTION(mSyncPointInDecodedStream >= 0, "Should have set up sync point");
  1.1087 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
  1.1088 +  StreamTime streamDelta = stream->GetLastOutputTime() - mSyncPointInMediaStream;
  1.1089 +  return mSyncPointInDecodedStream + MediaTimeToMicroseconds(streamDelta);
  1.1090 +}
  1.1091 +
  1.1092 +void MediaDecoderStateMachine::StartPlayback()
  1.1093 +{
  1.1094 +  DECODER_LOG(PR_LOG_DEBUG, "StartPlayback()");
  1.1095 +
  1.1096 +  NS_ASSERTION(!IsPlaying(), "Shouldn't be playing when StartPlayback() is called");
  1.1097 +  AssertCurrentThreadInMonitor();
  1.1098 +
  1.1099 +  mDecoder->NotifyPlaybackStarted();
  1.1100 +  mPlayStartTime = TimeStamp::Now();
  1.1101 +
  1.1102 +  NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()");
  1.1103 +  if (NS_FAILED(StartAudioThread())) {
  1.1104 +    NS_WARNING("Failed to create audio thread");
  1.1105 +  }
  1.1106 +  mDecoder->GetReentrantMonitor().NotifyAll();
  1.1107 +  mDecoder->UpdateStreamBlockingForStateMachinePlaying();
  1.1108 +}
  1.1109 +
  1.1110 +void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime)
  1.1111 +{
  1.1112 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1113 +               "Should be on state machine thread.");
  1.1114 +  AssertCurrentThreadInMonitor();
  1.1115 +
  1.1116 +  NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime");
  1.1117 +  mCurrentFrameTime = aTime - mStartTime;
  1.1118 +  NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!");
  1.1119 +  if (aTime > mEndTime) {
  1.1120 +    NS_ASSERTION(mCurrentFrameTime > GetDuration(),
  1.1121 +                 "CurrentTime must be after duration if aTime > endTime!");
  1.1122 +    mEndTime = aTime;
  1.1123 +    nsCOMPtr<nsIRunnable> event =
  1.1124 +      NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged);
  1.1125 +    NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1.1126 +  }
  1.1127 +}
  1.1128 +
  1.1129 +void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime)
  1.1130 +{
  1.1131 +  UpdatePlaybackPositionInternal(aTime);
  1.1132 +
  1.1133 +  bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime;
  1.1134 +  if (!mPositionChangeQueued || fragmentEnded) {
  1.1135 +    mPositionChangeQueued = true;
  1.1136 +    nsCOMPtr<nsIRunnable> event =
  1.1137 +      NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackPositionChanged);
  1.1138 +    NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1.1139 +  }
  1.1140 +
  1.1141 +  mMetadataManager.DispatchMetadataIfNeeded(mDecoder, aTime);
  1.1142 +
  1.1143 +  if (fragmentEnded) {
  1.1144 +    StopPlayback();
  1.1145 +  }
  1.1146 +}
  1.1147 +
  1.1148 +void MediaDecoderStateMachine::ClearPositionChangeFlag()
  1.1149 +{
  1.1150 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1151 +  AssertCurrentThreadInMonitor();
  1.1152 +
  1.1153 +  mPositionChangeQueued = false;
  1.1154 +}
  1.1155 +
  1.1156 +MediaDecoderOwner::NextFrameStatus MediaDecoderStateMachine::GetNextFrameStatus()
  1.1157 +{
  1.1158 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1159 +  if (IsBuffering() || IsSeeking()) {
  1.1160 +    return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING;
  1.1161 +  } else if (HaveNextFrameData()) {
  1.1162 +    return MediaDecoderOwner::NEXT_FRAME_AVAILABLE;
  1.1163 +  }
  1.1164 +  return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE;
  1.1165 +}
  1.1166 +
  1.1167 +void MediaDecoderStateMachine::SetVolume(double volume)
  1.1168 +{
  1.1169 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1170 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1171 +  mVolume = volume;
  1.1172 +}
  1.1173 +
  1.1174 +void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured)
  1.1175 +{
  1.1176 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1177 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1178 +  if (!mAudioCaptured && aCaptured && !mStopAudioThread) {
  1.1179 +    // Make sure the state machine runs as soon as possible. That will
  1.1180 +    // stop the audio thread.
  1.1181 +    // If mStopAudioThread is true then we're already stopping the audio thread
  1.1182 +    // and since we set mAudioCaptured to true, nothing can start it again.
  1.1183 +    ScheduleStateMachine();
  1.1184 +  }
  1.1185 +  mAudioCaptured = aCaptured;
  1.1186 +}
  1.1187 +
  1.1188 +double MediaDecoderStateMachine::GetCurrentTime() const
  1.1189 +{
  1.1190 +  NS_ASSERTION(NS_IsMainThread() ||
  1.1191 +               OnStateMachineThread() ||
  1.1192 +               OnDecodeThread(),
  1.1193 +               "Should be on main, decode, or state machine thread.");
  1.1194 +
  1.1195 +  return static_cast<double>(mCurrentFrameTime) / static_cast<double>(USECS_PER_S);
  1.1196 +}
  1.1197 +
  1.1198 +int64_t MediaDecoderStateMachine::GetDuration()
  1.1199 +{
  1.1200 +  AssertCurrentThreadInMonitor();
  1.1201 +
  1.1202 +  if (mEndTime == -1 || mStartTime == -1)
  1.1203 +    return -1;
  1.1204 +  return mEndTime - mStartTime;
  1.1205 +}
  1.1206 +
  1.1207 +void MediaDecoderStateMachine::SetDuration(int64_t aDuration)
  1.1208 +{
  1.1209 +  NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1.1210 +               "Should be on main or decode thread.");
  1.1211 +  AssertCurrentThreadInMonitor();
  1.1212 +
  1.1213 +  if (aDuration == -1) {
  1.1214 +    return;
  1.1215 +  }
  1.1216 +
  1.1217 +  if (mStartTime != -1) {
  1.1218 +    mEndTime = mStartTime + aDuration;
  1.1219 +  } else {
  1.1220 +    mStartTime = 0;
  1.1221 +    mEndTime = aDuration;
  1.1222 +  }
  1.1223 +}
  1.1224 +
  1.1225 +void MediaDecoderStateMachine::UpdateEstimatedDuration(int64_t aDuration)
  1.1226 +{
  1.1227 +  AssertCurrentThreadInMonitor();
  1.1228 +  int64_t duration = GetDuration();
  1.1229 +  if (aDuration != duration &&
  1.1230 +      abs(aDuration - duration) > ESTIMATED_DURATION_FUZZ_FACTOR_USECS) {
  1.1231 +    SetDuration(aDuration);
  1.1232 +    nsCOMPtr<nsIRunnable> event =
  1.1233 +      NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged);
  1.1234 +    NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1.1235 +  }
  1.1236 +}
  1.1237 +
  1.1238 +void MediaDecoderStateMachine::SetMediaEndTime(int64_t aEndTime)
  1.1239 +{
  1.1240 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread");
  1.1241 +  AssertCurrentThreadInMonitor();
  1.1242 +
  1.1243 +  mEndTime = aEndTime;
  1.1244 +}
  1.1245 +
  1.1246 +void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime)
  1.1247 +{
  1.1248 +  AssertCurrentThreadInMonitor();
  1.1249 +
  1.1250 +  mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime;
  1.1251 +}
  1.1252 +
  1.1253 +void MediaDecoderStateMachine::SetTransportSeekable(bool aTransportSeekable)
  1.1254 +{
  1.1255 +  NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1.1256 +      "Should be on main thread or the decoder thread.");
  1.1257 +  AssertCurrentThreadInMonitor();
  1.1258 +
  1.1259 +  mTransportSeekable = aTransportSeekable;
  1.1260 +}
  1.1261 +
  1.1262 +void MediaDecoderStateMachine::SetMediaSeekable(bool aMediaSeekable)
  1.1263 +{
  1.1264 +  NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(),
  1.1265 +      "Should be on main thread or the decoder thread.");
  1.1266 +
  1.1267 +  mMediaSeekable = aMediaSeekable;
  1.1268 +}
  1.1269 +
  1.1270 +bool MediaDecoderStateMachine::IsDormantNeeded()
  1.1271 +{
  1.1272 +  return mReader->IsDormantNeeded();
  1.1273 +}
  1.1274 +
  1.1275 +void MediaDecoderStateMachine::SetDormant(bool aDormant)
  1.1276 +{
  1.1277 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1278 +  AssertCurrentThreadInMonitor();
  1.1279 +
  1.1280 +  if (!mReader) {
  1.1281 +    return;
  1.1282 +  }
  1.1283 +
  1.1284 +  if (aDormant) {
  1.1285 +    ScheduleStateMachine();
  1.1286 +    mState = DECODER_STATE_DORMANT;
  1.1287 +    mDecoder->GetReentrantMonitor().NotifyAll();
  1.1288 +  } else if ((aDormant != true) && (mState == DECODER_STATE_DORMANT)) {
  1.1289 +    ScheduleStateMachine();
  1.1290 +    mStartTime = 0;
  1.1291 +    mCurrentFrameTime = 0;
  1.1292 +    mState = DECODER_STATE_DECODING_METADATA;
  1.1293 +    mDecoder->GetReentrantMonitor().NotifyAll();
  1.1294 +  }
  1.1295 +}
  1.1296 +
  1.1297 +void MediaDecoderStateMachine::Shutdown()
  1.1298 +{
  1.1299 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1300 +
  1.1301 +  // Once we've entered the shutdown state here there's no going back.
  1.1302 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1303 +
  1.1304 +  // Change state before issuing shutdown request to threads so those
  1.1305 +  // threads can start exiting cleanly during the Shutdown call.
  1.1306 +  DECODER_LOG(PR_LOG_DEBUG, "Changed state to SHUTDOWN");
  1.1307 +  ScheduleStateMachine();
  1.1308 +  mState = DECODER_STATE_SHUTDOWN;
  1.1309 +  mDecoder->GetReentrantMonitor().NotifyAll();
  1.1310 +}
  1.1311 +
  1.1312 +void MediaDecoderStateMachine::StartDecoding()
  1.1313 +{
  1.1314 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1315 +               "Should be on state machine or decode thread.");
  1.1316 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1317 +  if (mState == DECODER_STATE_DECODING) {
  1.1318 +    return;
  1.1319 +  }
  1.1320 +  mState = DECODER_STATE_DECODING;
  1.1321 +
  1.1322 +  mDecodeStartTime = TimeStamp::Now();
  1.1323 +
  1.1324 +  // Reset our "stream finished decoding" flags, so we try to decode all
  1.1325 +  // streams that we have when we start decoding.
  1.1326 +  mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished();
  1.1327 +  mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished();
  1.1328 +
  1.1329 +  CheckIfDecodeComplete();
  1.1330 +  if (mState == DECODER_STATE_COMPLETED) {
  1.1331 +    return;
  1.1332 +  }
  1.1333 +
  1.1334 +  // Reset other state to pristine values before starting decode.
  1.1335 +  mSkipToNextKeyFrame = false;
  1.1336 +  mIsAudioPrerolling = true;
  1.1337 +  mIsVideoPrerolling = true;
  1.1338 +
  1.1339 +  // Ensure that we've got tasks enqueued to decode data if we need to.
  1.1340 +  DispatchDecodeTasksIfNeeded();
  1.1341 +
  1.1342 +  ScheduleStateMachine();
  1.1343 +}
  1.1344 +
  1.1345 +void MediaDecoderStateMachine::StartWaitForResources()
  1.1346 +{
  1.1347 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1348 +               "Should be on state machine or decode thread.");
  1.1349 +  AssertCurrentThreadInMonitor();
  1.1350 +  mState = DECODER_STATE_WAIT_FOR_RESOURCES;
  1.1351 +}
  1.1352 +
  1.1353 +void MediaDecoderStateMachine::NotifyWaitingForResourcesStatusChanged()
  1.1354 +{
  1.1355 +  AssertCurrentThreadInMonitor();
  1.1356 +  if (mState != DECODER_STATE_WAIT_FOR_RESOURCES ||
  1.1357 +      mReader->IsWaitingMediaResources()) {
  1.1358 +    return;
  1.1359 +  }
  1.1360 +  // The reader is no longer waiting for resources (say a hardware decoder),
  1.1361 +  // we can now proceed to decode metadata.
  1.1362 +  mState = DECODER_STATE_DECODING_METADATA;
  1.1363 +  EnqueueDecodeMetadataTask();
  1.1364 +}
  1.1365 +
  1.1366 +void MediaDecoderStateMachine::Play()
  1.1367 +{
  1.1368 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1369 +  // When asked to play, switch to decoding state only if
  1.1370 +  // we are currently buffering. In other cases, we'll start playing anyway
  1.1371 +  // when the state machine notices the decoder's state change to PLAYING.
  1.1372 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1373 +  if (mState == DECODER_STATE_BUFFERING) {
  1.1374 +    DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING");
  1.1375 +    mState = DECODER_STATE_DECODING;
  1.1376 +    mDecodeStartTime = TimeStamp::Now();
  1.1377 +  }
  1.1378 +  // Once we start playing, we don't want to minimize our prerolling, as we
  1.1379 +  // assume the user is likely to want to keep playing in future.
  1.1380 +  mMinimizePreroll = false;
  1.1381 +  ScheduleStateMachine();
  1.1382 +}
  1.1383 +
  1.1384 +void MediaDecoderStateMachine::ResetPlayback()
  1.1385 +{
  1.1386 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.1387 +  mVideoFrameEndTime = -1;
  1.1388 +  mAudioStartTime = -1;
  1.1389 +  mAudioEndTime = -1;
  1.1390 +  mAudioCompleted = false;
  1.1391 +}
  1.1392 +
  1.1393 +void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
  1.1394 +                                                     uint32_t aLength,
  1.1395 +                                                     int64_t aOffset)
  1.1396 +{
  1.1397 +  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
  1.1398 +  mReader->NotifyDataArrived(aBuffer, aLength, aOffset);
  1.1399 +
  1.1400 +  // While playing an unseekable stream of unknown duration, mEndTime is
  1.1401 +  // updated (in AdvanceFrame()) as we play. But if data is being downloaded
  1.1402 +  // faster than played, mEndTime won't reflect the end of playable data
  1.1403 +  // since we haven't played the frame at the end of buffered data. So update
  1.1404 +  // mEndTime here as new data is downloaded to prevent such a lag.
  1.1405 +  dom::TimeRanges buffered;
  1.1406 +  if (mDecoder->IsInfinite() &&
  1.1407 +      NS_SUCCEEDED(mDecoder->GetBuffered(&buffered)))
  1.1408 +  {
  1.1409 +    uint32_t length = 0;
  1.1410 +    buffered.GetLength(&length);
  1.1411 +    if (length) {
  1.1412 +      double end = 0;
  1.1413 +      buffered.End(length - 1, &end);
  1.1414 +      ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1415 +      mEndTime = std::max<int64_t>(mEndTime, end * USECS_PER_S);
  1.1416 +    }
  1.1417 +  }
  1.1418 +}
  1.1419 +
  1.1420 +void MediaDecoderStateMachine::Seek(const SeekTarget& aTarget)
  1.1421 +{
  1.1422 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.1423 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1424 +
  1.1425 +  // We need to be able to seek both at a transport level and at a media level
  1.1426 +  // to seek.
  1.1427 +  if (!mMediaSeekable) {
  1.1428 +    return;
  1.1429 +  }
  1.1430 +  // MediaDecoder::mPlayState should be SEEKING while we seek, and
  1.1431 +  // in that case MediaDecoder shouldn't be calling us.
  1.1432 +  NS_ASSERTION(mState != DECODER_STATE_SEEKING,
  1.1433 +               "We shouldn't already be seeking");
  1.1434 +  NS_ASSERTION(mState >= DECODER_STATE_DECODING,
  1.1435 +               "We should have loaded metadata");
  1.1436 +
  1.1437 +  // Bound the seek time to be inside the media range.
  1.1438 +  NS_ASSERTION(mStartTime != -1, "Should know start time by now");
  1.1439 +  NS_ASSERTION(mEndTime != -1, "Should know end time by now");
  1.1440 +  int64_t seekTime = aTarget.mTime + mStartTime;
  1.1441 +  seekTime = std::min(seekTime, mEndTime);
  1.1442 +  seekTime = std::max(mStartTime, seekTime);
  1.1443 +  NS_ASSERTION(seekTime >= mStartTime && seekTime <= mEndTime,
  1.1444 +               "Can only seek in range [0,duration]");
  1.1445 +  mSeekTarget = SeekTarget(seekTime, aTarget.mType);
  1.1446 +
  1.1447 +  mBasePosition = seekTime - mStartTime;
  1.1448 +  DECODER_LOG(PR_LOG_DEBUG, "Changed state to SEEKING (to %lld)", mSeekTarget.mTime);
  1.1449 +  mState = DECODER_STATE_SEEKING;
  1.1450 +  if (mDecoder->GetDecodedStream()) {
  1.1451 +    mDecoder->RecreateDecodedStream(seekTime - mStartTime);
  1.1452 +  }
  1.1453 +  ScheduleStateMachine();
  1.1454 +}
  1.1455 +
  1.1456 +void MediaDecoderStateMachine::StopAudioThread()
  1.1457 +{
  1.1458 +  NS_ASSERTION(OnDecodeThread() ||
  1.1459 +               OnStateMachineThread(), "Should be on decode thread or state machine thread");
  1.1460 +  AssertCurrentThreadInMonitor();
  1.1461 +
  1.1462 +  if (mStopAudioThread) {
  1.1463 +    // Nothing to do, since the thread is already stopping
  1.1464 +    return;
  1.1465 +  }
  1.1466 +
  1.1467 +  mStopAudioThread = true;
  1.1468 +  mDecoder->GetReentrantMonitor().NotifyAll();
  1.1469 +  if (mAudioThread) {
  1.1470 +    DECODER_LOG(PR_LOG_DEBUG, "Shutdown audio thread");
  1.1471 +    {
  1.1472 +      ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1473 +      mAudioThread->Shutdown();
  1.1474 +    }
  1.1475 +    mAudioThread = nullptr;
  1.1476 +    // Now that the audio thread is dead, try sending data to our MediaStream(s).
  1.1477 +    // That may have been waiting for the audio thread to stop.
  1.1478 +    SendStreamData();
  1.1479 +  }
  1.1480 +}
  1.1481 +
  1.1482 +nsresult
  1.1483 +MediaDecoderStateMachine::EnqueueDecodeMetadataTask()
  1.1484 +{
  1.1485 +  AssertCurrentThreadInMonitor();
  1.1486 +
  1.1487 +  if (mState != DECODER_STATE_DECODING_METADATA) {
  1.1488 +    return NS_OK;
  1.1489 +  }
  1.1490 +  nsresult rv = mDecodeTaskQueue->Dispatch(
  1.1491 +    NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata));
  1.1492 +  NS_ENSURE_SUCCESS(rv, rv);
  1.1493 +
  1.1494 +  return NS_OK;
  1.1495 +}
  1.1496 +
  1.1497 +void
  1.1498 +MediaDecoderStateMachine::EnsureActive()
  1.1499 +{
  1.1500 +  AssertCurrentThreadInMonitor();
  1.1501 +  MOZ_ASSERT(OnDecodeThread());
  1.1502 +  if (!mIsReaderIdle) {
  1.1503 +    return;
  1.1504 +  }
  1.1505 +  mIsReaderIdle = false;
  1.1506 +  {
  1.1507 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1508 +    SetReaderActive();
  1.1509 +  }
  1.1510 +}
  1.1511 +
  1.1512 +void
  1.1513 +MediaDecoderStateMachine::SetReaderIdle()
  1.1514 +{
  1.1515 +#ifdef PR_LOGGING
  1.1516 +  {
  1.1517 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1518 +    DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld",
  1.1519 +                GetDecodedAudioDuration(),
  1.1520 +                VideoQueue().Duration());
  1.1521 +  }
  1.1522 +#endif
  1.1523 +  MOZ_ASSERT(OnDecodeThread());
  1.1524 +  mReader->SetIdle();
  1.1525 +}
  1.1526 +
  1.1527 +void
  1.1528 +MediaDecoderStateMachine::SetReaderActive()
  1.1529 +{
  1.1530 +  DECODER_LOG(PR_LOG_DEBUG, "SetReaderActive()");
  1.1531 +  MOZ_ASSERT(OnDecodeThread());
  1.1532 +  mReader->SetActive();
  1.1533 +}
  1.1534 +
  1.1535 +void
  1.1536 +MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded()
  1.1537 +{
  1.1538 +  AssertCurrentThreadInMonitor();
  1.1539 +
  1.1540 +  // NeedToDecodeAudio() can go from false to true while we hold the
  1.1541 +  // monitor, but it can't go from true to false. This can happen because
  1.1542 +  // NeedToDecodeAudio() takes into account the amount of decoded audio
  1.1543 +  // that's been written to the AudioStream but not played yet. So if we
  1.1544 +  // were calling NeedToDecodeAudio() twice and we thread-context switch
  1.1545 +  // between the calls, audio can play, which can affect the return value
  1.1546 +  // of NeedToDecodeAudio() giving inconsistent results. So we cache the
  1.1547 +  // value returned by NeedToDecodeAudio(), and make decisions
  1.1548 +  // based on the cached value. If NeedToDecodeAudio() has
  1.1549 +  // returned false, and then subsequently returns true and we're not
  1.1550 +  // playing, it will probably be OK since we don't need to consume data
  1.1551 +  // anyway.
  1.1552 +
  1.1553 +  const bool needToDecodeAudio = NeedToDecodeAudio();
  1.1554 +  const bool needToDecodeVideo = NeedToDecodeVideo();
  1.1555 +
  1.1556 +  // If we're in completed state, we should not need to decode anything else.
  1.1557 +  MOZ_ASSERT(mState != DECODER_STATE_COMPLETED ||
  1.1558 +             (!needToDecodeAudio && !needToDecodeVideo));
  1.1559 +
  1.1560 +  bool needIdle = !mDecoder->IsLogicallyPlaying() &&
  1.1561 +                  mState != DECODER_STATE_SEEKING &&
  1.1562 +                  !needToDecodeAudio &&
  1.1563 +                  !needToDecodeVideo &&
  1.1564 +                  !IsPlaying();
  1.1565 +
  1.1566 +  if (needToDecodeAudio) {
  1.1567 +    EnsureAudioDecodeTaskQueued();
  1.1568 +  }
  1.1569 +  if (needToDecodeVideo) {
  1.1570 +    EnsureVideoDecodeTaskQueued();
  1.1571 +  }
  1.1572 +
  1.1573 +  if (mIsReaderIdle == needIdle) {
  1.1574 +    return;
  1.1575 +  }
  1.1576 +  mIsReaderIdle = needIdle;
  1.1577 +  RefPtr<nsIRunnable> event;
  1.1578 +  if (mIsReaderIdle) {
  1.1579 +    event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderIdle);
  1.1580 +  } else {
  1.1581 +    event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderActive);
  1.1582 +  }
  1.1583 +  if (NS_FAILED(mDecodeTaskQueue->Dispatch(event.forget())) &&
  1.1584 +      mState != DECODER_STATE_SHUTDOWN) {
  1.1585 +    NS_WARNING("Failed to dispatch event to set decoder idle state");
  1.1586 +  }
  1.1587 +}
  1.1588 +
  1.1589 +nsresult
  1.1590 +MediaDecoderStateMachine::EnqueueDecodeSeekTask()
  1.1591 +{
  1.1592 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1593 +               "Should be on state machine or decode thread.");
  1.1594 +  AssertCurrentThreadInMonitor();
  1.1595 +
  1.1596 +  if (mState != DECODER_STATE_SEEKING) {
  1.1597 +    return NS_OK;
  1.1598 +  }
  1.1599 +  nsresult rv = mDecodeTaskQueue->Dispatch(
  1.1600 +    NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek));
  1.1601 +  NS_ENSURE_SUCCESS(rv, rv);
  1.1602 +
  1.1603 +  return NS_OK;
  1.1604 +}
  1.1605 +
  1.1606 +nsresult
  1.1607 +MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded()
  1.1608 +{
  1.1609 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1610 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1611 +               "Should be on state machine or decode thread.");
  1.1612 +
  1.1613 +  if (NeedToDecodeAudio()) {
  1.1614 +    return EnsureAudioDecodeTaskQueued();
  1.1615 +  }
  1.1616 +
  1.1617 +  return NS_OK;
  1.1618 +}
  1.1619 +
  1.1620 +nsresult
  1.1621 +MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued()
  1.1622 +{
  1.1623 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1624 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1625 +               "Should be on state machine or decode thread.");
  1.1626 +
  1.1627 +  if (mState >= DECODER_STATE_COMPLETED) {
  1.1628 +    return NS_OK;
  1.1629 +  }
  1.1630 +
  1.1631 +  MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
  1.1632 +
  1.1633 +  if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) {
  1.1634 +    nsresult rv = mDecodeTaskQueue->Dispatch(
  1.1635 +      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio));
  1.1636 +    if (NS_SUCCEEDED(rv)) {
  1.1637 +      mDispatchedAudioDecodeTask = true;
  1.1638 +    } else {
  1.1639 +      NS_WARNING("Failed to dispatch task to decode audio");
  1.1640 +    }
  1.1641 +  }
  1.1642 +
  1.1643 +  return NS_OK;
  1.1644 +}
  1.1645 +
  1.1646 +nsresult
  1.1647 +MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded()
  1.1648 +{
  1.1649 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1650 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1651 +               "Should be on state machine or decode thread.");
  1.1652 +
  1.1653 +  if (NeedToDecodeVideo()) {
  1.1654 +    return EnsureVideoDecodeTaskQueued();
  1.1655 +  }
  1.1656 +
  1.1657 +  return NS_OK;
  1.1658 +}
  1.1659 +
  1.1660 +nsresult
  1.1661 +MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued()
  1.1662 +{
  1.1663 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1664 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1665 +               "Should be on state machine or decode thread.");
  1.1666 +
  1.1667 +  if (mState >= DECODER_STATE_COMPLETED) {
  1.1668 +    return NS_OK;
  1.1669 +  }
  1.1670 +
  1.1671 +  MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA);
  1.1672 +
  1.1673 +  if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) {
  1.1674 +    nsresult rv = mDecodeTaskQueue->Dispatch(
  1.1675 +      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo));
  1.1676 +    if (NS_SUCCEEDED(rv)) {
  1.1677 +      mDispatchedVideoDecodeTask = true;
  1.1678 +    } else {
  1.1679 +      NS_WARNING("Failed to dispatch task to decode video");
  1.1680 +    }
  1.1681 +  }
  1.1682 +
  1.1683 +  return NS_OK;
  1.1684 +}
  1.1685 +
  1.1686 +nsresult
  1.1687 +MediaDecoderStateMachine::StartAudioThread()
  1.1688 +{
  1.1689 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.1690 +               "Should be on state machine or decode thread.");
  1.1691 +  AssertCurrentThreadInMonitor();
  1.1692 +  if (mAudioCaptured) {
  1.1693 +    NS_ASSERTION(mStopAudioThread, "mStopAudioThread must always be true if audio is captured");
  1.1694 +    return NS_OK;
  1.1695 +  }
  1.1696 +
  1.1697 +  mStopAudioThread = false;
  1.1698 +  if (HasAudio() && !mAudioThread) {
  1.1699 +    nsresult rv = NS_NewNamedThread("Media Audio",
  1.1700 +                                    getter_AddRefs(mAudioThread),
  1.1701 +                                    nullptr,
  1.1702 +                                    MEDIA_THREAD_STACK_SIZE);
  1.1703 +    if (NS_FAILED(rv)) {
  1.1704 +      DECODER_LOG(PR_LOG_WARNING, "Changed state to SHUTDOWN because failed to create audio thread");
  1.1705 +      mState = DECODER_STATE_SHUTDOWN;
  1.1706 +      return rv;
  1.1707 +    }
  1.1708 +
  1.1709 +    nsCOMPtr<nsIRunnable> event =
  1.1710 +      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop);
  1.1711 +    mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL);
  1.1712 +  }
  1.1713 +  return NS_OK;
  1.1714 +}
  1.1715 +
  1.1716 +int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
  1.1717 +{
  1.1718 +  NS_ASSERTION(HasAudio(),
  1.1719 +               "Should only call AudioDecodedUsecs() when we have audio");
  1.1720 +  // The amount of audio we have decoded is the amount of audio data we've
  1.1721 +  // already decoded and pushed to the hardware, plus the amount of audio
  1.1722 +  // data waiting to be pushed to the hardware.
  1.1723 +  int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0;
  1.1724 +  return pushed + AudioQueue().Duration();
  1.1725 +}
  1.1726 +
  1.1727 +bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs)
  1.1728 +{
  1.1729 +  AssertCurrentThreadInMonitor();
  1.1730 +  // We consider ourselves low on decoded data if we're low on audio,
  1.1731 +  // provided we've not decoded to the end of the audio stream, or
  1.1732 +  // if we're low on video frames, provided
  1.1733 +  // we've not decoded to the end of the video stream.
  1.1734 +  return ((HasAudio() &&
  1.1735 +           !AudioQueue().IsFinished() &&
  1.1736 +           AudioDecodedUsecs() < aAudioUsecs)
  1.1737 +          ||
  1.1738 +         (HasVideo() &&
  1.1739 +          !VideoQueue().IsFinished() &&
  1.1740 +          static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES));
  1.1741 +}
  1.1742 +
  1.1743 +bool MediaDecoderStateMachine::HasLowUndecodedData()
  1.1744 +{
  1.1745 +  return HasLowUndecodedData(mLowDataThresholdUsecs);
  1.1746 +}
  1.1747 +
  1.1748 +bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs)
  1.1749 +{
  1.1750 +  AssertCurrentThreadInMonitor();
  1.1751 +  NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA,
  1.1752 +               "Must have loaded metadata for GetBuffered() to work");
  1.1753 +
  1.1754 +  bool reliable;
  1.1755 +  double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable);
  1.1756 +  if (!reliable) {
  1.1757 +    // Default to assuming we have enough
  1.1758 +    return false;
  1.1759 +  }
  1.1760 +
  1.1761 +  MediaResource* stream = mDecoder->GetResource();
  1.1762 +  int64_t currentPos = stream->Tell();
  1.1763 +  int64_t requiredPos = currentPos + int64_t((aUsecs/1000000.0)*bytesPerSecond);
  1.1764 +  int64_t length = stream->GetLength();
  1.1765 +  if (length >= 0) {
  1.1766 +    requiredPos = std::min(requiredPos, length);
  1.1767 +  }
  1.1768 +
  1.1769 +  return stream->GetCachedDataEnd(currentPos) < requiredPos;
  1.1770 +}
  1.1771 +
  1.1772 +void
  1.1773 +MediaDecoderStateMachine::DecodeError()
  1.1774 +{
  1.1775 +  AssertCurrentThreadInMonitor();
  1.1776 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.1777 +
  1.1778 +  // Change state to shutdown before sending error report to MediaDecoder
  1.1779 +  // and the HTMLMediaElement, so that our pipeline can start exiting
  1.1780 +  // cleanly during the sync dispatch below.
  1.1781 +  DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN");
  1.1782 +  ScheduleStateMachine();
  1.1783 +  mState = DECODER_STATE_SHUTDOWN;
  1.1784 +  mDecoder->GetReentrantMonitor().NotifyAll();
  1.1785 +
  1.1786 +  // Dispatch the event to call DecodeError synchronously. This ensures
  1.1787 +  // we're in shutdown state by the time we exit the decode thread.
  1.1788 +  // If we just moved to shutdown state here on the decode thread, we may
  1.1789 +  // cause the state machine to shutdown/free memory without closing its
  1.1790 +  // media stream properly, and we'll get callbacks from the media stream
  1.1791 +  // causing a crash.
  1.1792 + {
  1.1793 +    nsCOMPtr<nsIRunnable> event =
  1.1794 +      NS_NewRunnableMethod(mDecoder, &MediaDecoder::DecodeError);
  1.1795 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1796 +    NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
  1.1797 +  }
  1.1798 +}
  1.1799 +
  1.1800 +void
  1.1801 +MediaDecoderStateMachine::CallDecodeMetadata()
  1.1802 +{
  1.1803 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1804 +  if (mState != DECODER_STATE_DECODING_METADATA) {
  1.1805 +    return;
  1.1806 +  }
  1.1807 +  if (NS_FAILED(DecodeMetadata())) {
  1.1808 +    DECODER_LOG(PR_LOG_WARNING, "Decode metadata failed, shutting down decoder");
  1.1809 +    DecodeError();
  1.1810 +  }
  1.1811 +}
  1.1812 +
  1.1813 +nsresult MediaDecoderStateMachine::DecodeMetadata()
  1.1814 +{
  1.1815 +  AssertCurrentThreadInMonitor();
  1.1816 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.1817 +  DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers");
  1.1818 +  if (mState != DECODER_STATE_DECODING_METADATA) {
  1.1819 +    return NS_ERROR_FAILURE;
  1.1820 +  }
  1.1821 +  EnsureActive();
  1.1822 +
  1.1823 +  nsresult res;
  1.1824 +  MediaInfo info;
  1.1825 +  MetadataTags* tags;
  1.1826 +  {
  1.1827 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1828 +    res = mReader->ReadMetadata(&info, &tags);
  1.1829 +  }
  1.1830 +  if (NS_SUCCEEDED(res) &&
  1.1831 +      mState == DECODER_STATE_DECODING_METADATA &&
  1.1832 +      mReader->IsWaitingMediaResources()) {
  1.1833 +    // change state to DECODER_STATE_WAIT_FOR_RESOURCES
  1.1834 +    StartWaitForResources();
  1.1835 +    return NS_OK;
  1.1836 +  }
  1.1837 +
  1.1838 +  mInfo = info;
  1.1839 +
  1.1840 +  if (NS_FAILED(res) || (!info.HasValidMedia())) {
  1.1841 +    return NS_ERROR_FAILURE;
  1.1842 +  }
  1.1843 +  mDecoder->StartProgressUpdates();
  1.1844 +  mGotDurationFromMetaData = (GetDuration() != -1);
  1.1845 +
  1.1846 +  VideoData* videoData = FindStartTime();
  1.1847 +  if (videoData) {
  1.1848 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1849 +    RenderVideoFrame(videoData, TimeStamp::Now());
  1.1850 +  }
  1.1851 +
  1.1852 +  if (mState == DECODER_STATE_SHUTDOWN) {
  1.1853 +    return NS_ERROR_FAILURE;
  1.1854 +  }
  1.1855 +
  1.1856 +  NS_ASSERTION(mStartTime != -1, "Must have start time");
  1.1857 +  MOZ_ASSERT((!HasVideo() && !HasAudio()) ||
  1.1858 +              !(mMediaSeekable && mTransportSeekable) || mEndTime != -1,
  1.1859 +              "Active seekable media should have end time");
  1.1860 +  MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) ||
  1.1861 +             GetDuration() != -1, "Seekable media should have duration");
  1.1862 +  DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) "
  1.1863 +              "transportSeekable=%d, mediaSeekable=%d",
  1.1864 +              mStartTime, mEndTime, GetDuration(), mTransportSeekable, mMediaSeekable);
  1.1865 +
  1.1866 +  if (HasAudio() && !HasVideo()) {
  1.1867 +    // We're playing audio only. We don't need to worry about slow video
  1.1868 +    // decodes causing audio underruns, so don't buffer so much audio in
  1.1869 +    // order to reduce memory usage.
  1.1870 +    mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
  1.1871 +    mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR;
  1.1872 +  }
  1.1873 +
  1.1874 +  // Inform the element that we've loaded the metadata and the first frame.
  1.1875 +  nsCOMPtr<nsIRunnable> metadataLoadedEvent =
  1.1876 +    new AudioMetadataEventRunner(mDecoder,
  1.1877 +                                 mInfo.mAudio.mChannels,
  1.1878 +                                 mInfo.mAudio.mRate,
  1.1879 +                                 HasAudio(),
  1.1880 +                                 HasVideo(),
  1.1881 +                                 tags);
  1.1882 +  NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL);
  1.1883 +
  1.1884 +  if (HasAudio()) {
  1.1885 +    RefPtr<nsIRunnable> decodeTask(
  1.1886 +      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded));
  1.1887 +    AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
  1.1888 +  }
  1.1889 +  if (HasVideo()) {
  1.1890 +    RefPtr<nsIRunnable> decodeTask(
  1.1891 +      NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded));
  1.1892 +    VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue);
  1.1893 +  }
  1.1894 +
  1.1895 +  if (mState == DECODER_STATE_DECODING_METADATA) {
  1.1896 +    DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING");
  1.1897 +    StartDecoding();
  1.1898 +  }
  1.1899 +
  1.1900 +  // For very short media FindStartTime() can decode the entire media.
  1.1901 +  // So we need to check if this has occurred, else our decode pipeline won't
  1.1902 +  // run (since it doesn't need to) and we won't detect end of stream.
  1.1903 +  CheckIfDecodeComplete();
  1.1904 +
  1.1905 +  if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) &&
  1.1906 +      mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1.1907 +      !IsPlaying())
  1.1908 +  {
  1.1909 +    StartPlayback();
  1.1910 +  }
  1.1911 +
  1.1912 +  return NS_OK;
  1.1913 +}
  1.1914 +
  1.1915 +void MediaDecoderStateMachine::DecodeSeek()
  1.1916 +{
  1.1917 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.1918 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.1919 +  if (mState != DECODER_STATE_SEEKING) {
  1.1920 +    return;
  1.1921 +  }
  1.1922 +  EnsureActive();
  1.1923 +
  1.1924 +  // During the seek, don't have a lock on the decoder state,
  1.1925 +  // otherwise long seek operations can block the main thread.
  1.1926 +  // The events dispatched to the main thread are SYNC calls.
  1.1927 +  // These calls are made outside of the decode monitor lock so
  1.1928 +  // it is safe for the main thread to makes calls that acquire
  1.1929 +  // the lock since it won't deadlock. We check the state when
  1.1930 +  // acquiring the lock again in case shutdown has occurred
  1.1931 +  // during the time when we didn't have the lock.
  1.1932 +  int64_t seekTime = mSeekTarget.mTime;
  1.1933 +  mDecoder->StopProgressUpdates();
  1.1934 +
  1.1935 +  bool currentTimeChanged = false;
  1.1936 +  const int64_t mediaTime = GetMediaTime();
  1.1937 +  if (mediaTime != seekTime) {
  1.1938 +    currentTimeChanged = true;
  1.1939 +    // Stop playback now to ensure that while we're outside the monitor
  1.1940 +    // dispatching SeekingStarted, playback doesn't advance and mess with
  1.1941 +    // mCurrentFrameTime that we've setting to seekTime here.
  1.1942 +    StopPlayback();
  1.1943 +    UpdatePlaybackPositionInternal(seekTime);
  1.1944 +  }
  1.1945 +
  1.1946 +  // SeekingStarted will do a UpdateReadyStateForData which will
  1.1947 +  // inform the element and its users that we have no frames
  1.1948 +  // to display
  1.1949 +  {
  1.1950 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1951 +    nsCOMPtr<nsIRunnable> startEvent =
  1.1952 +      NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted);
  1.1953 +    NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC);
  1.1954 +  }
  1.1955 +
  1.1956 +  int64_t newCurrentTime = seekTime;
  1.1957 +  if (currentTimeChanged) {
  1.1958 +    // The seek target is different than the current playback position,
  1.1959 +    // we'll need to seek the playback position, so shutdown our decode
  1.1960 +    // and audio threads.
  1.1961 +    StopAudioThread();
  1.1962 +    ResetPlayback();
  1.1963 +    nsresult res;
  1.1964 +    {
  1.1965 +      ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1966 +      // Now perform the seek. We must not hold the state machine monitor
  1.1967 +      // while we seek, since the seek reads, which could block on I/O.
  1.1968 +      res = mReader->Seek(seekTime,
  1.1969 +                          mStartTime,
  1.1970 +                          mEndTime,
  1.1971 +                          mediaTime);
  1.1972 +
  1.1973 +      if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) {
  1.1974 +        res = mReader->DecodeToTarget(seekTime);
  1.1975 +      }
  1.1976 +    }
  1.1977 +
  1.1978 +    if (NS_SUCCEEDED(res)) {
  1.1979 +      int64_t nextSampleStartTime = 0;
  1.1980 +      VideoData* video = nullptr;
  1.1981 +      {
  1.1982 +        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.1983 +        video = mReader->FindStartTime(nextSampleStartTime);
  1.1984 +      }
  1.1985 +
  1.1986 +      // Setup timestamp state.
  1.1987 +      if (seekTime == mEndTime) {
  1.1988 +        newCurrentTime = mAudioStartTime = seekTime;
  1.1989 +      } else if (HasAudio()) {
  1.1990 +        AudioData* audio = AudioQueue().PeekFront();
  1.1991 +        newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime;
  1.1992 +      } else {
  1.1993 +        newCurrentTime = video ? video->mTime : seekTime;
  1.1994 +      }
  1.1995 +      mPlayDuration = newCurrentTime - mStartTime;
  1.1996 +
  1.1997 +      if (HasVideo()) {
  1.1998 +        if (video) {
  1.1999 +          {
  1.2000 +            ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2001 +            RenderVideoFrame(video, TimeStamp::Now());
  1.2002 +          }
  1.2003 +          nsCOMPtr<nsIRunnable> event =
  1.2004 +            NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate);
  1.2005 +          NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1.2006 +        }
  1.2007 +      }
  1.2008 +    } else {
  1.2009 +      DecodeError();
  1.2010 +    }
  1.2011 +  }
  1.2012 +  mDecoder->StartProgressUpdates();
  1.2013 +  if (mState == DECODER_STATE_DECODING_METADATA ||
  1.2014 +      mState == DECODER_STATE_DORMANT ||
  1.2015 +      mState == DECODER_STATE_SHUTDOWN) {
  1.2016 +    return;
  1.2017 +  }
  1.2018 +
  1.2019 +  // Change state to DECODING or COMPLETED now. SeekingStopped will
  1.2020 +  // call MediaDecoderStateMachine::Seek to reset our state to SEEKING
  1.2021 +  // if we need to seek again.
  1.2022 +
  1.2023 +  nsCOMPtr<nsIRunnable> stopEvent;
  1.2024 +  bool isLiveStream = mDecoder->GetResource()->GetLength() == -1;
  1.2025 +  if (GetMediaTime() == mEndTime && !isLiveStream) {
  1.2026 +    // Seeked to end of media, move to COMPLETED state. Note we don't do
  1.2027 +    // this if we're playing a live stream, since the end of media will advance
  1.2028 +    // once we download more data!
  1.2029 +    DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime);
  1.2030 +    stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd);
  1.2031 +    // Explicitly set our state so we don't decode further, and so
  1.2032 +    // we report playback ended to the media element.
  1.2033 +    mState = DECODER_STATE_COMPLETED;
  1.2034 +    mIsAudioDecoding = false;
  1.2035 +    mIsVideoDecoding = false;
  1.2036 +    DispatchDecodeTasksIfNeeded();
  1.2037 +  } else {
  1.2038 +    DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime);
  1.2039 +    stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped);
  1.2040 +    StartDecoding();
  1.2041 +  }
  1.2042 +
  1.2043 +  if (newCurrentTime != mediaTime) {
  1.2044 +    UpdatePlaybackPositionInternal(newCurrentTime);
  1.2045 +    if (mDecoder->GetDecodedStream()) {
  1.2046 +      SetSyncPointForMediaStream();
  1.2047 +    }
  1.2048 +  }
  1.2049 +
  1.2050 +  // Try to decode another frame to detect if we're at the end...
  1.2051 +  DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime);
  1.2052 +
  1.2053 +  {
  1.2054 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2055 +    NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC);
  1.2056 +  }
  1.2057 +
  1.2058 +  // Reset quick buffering status. This ensures that if we began the
  1.2059 +  // seek while quick-buffering, we won't bypass quick buffering mode
  1.2060 +  // if we need to buffer after the seek.
  1.2061 +  mQuickBuffering = false;
  1.2062 +
  1.2063 +  ScheduleStateMachine();
  1.2064 +}
  1.2065 +
  1.2066 +// Runnable to dispose of the decoder and state machine on the main thread.
  1.2067 +class nsDecoderDisposeEvent : public nsRunnable {
  1.2068 +public:
  1.2069 +  nsDecoderDisposeEvent(already_AddRefed<MediaDecoder> aDecoder,
  1.2070 +                        already_AddRefed<MediaDecoderStateMachine> aStateMachine)
  1.2071 +    : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
  1.2072 +  NS_IMETHOD Run() {
  1.2073 +    NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
  1.2074 +    mStateMachine->ReleaseDecoder();
  1.2075 +    mDecoder->ReleaseStateMachine();
  1.2076 +    mStateMachine = nullptr;
  1.2077 +    mDecoder = nullptr;
  1.2078 +    return NS_OK;
  1.2079 +  }
  1.2080 +private:
  1.2081 +  nsRefPtr<MediaDecoder> mDecoder;
  1.2082 +  nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  1.2083 +};
  1.2084 +
  1.2085 +// Runnable which dispatches an event to the main thread to dispose of the
  1.2086 +// decoder and state machine. This runs on the state machine thread after
  1.2087 +// the state machine has shutdown, and all events for that state machine have
  1.2088 +// finished running.
  1.2089 +class nsDispatchDisposeEvent : public nsRunnable {
  1.2090 +public:
  1.2091 +  nsDispatchDisposeEvent(MediaDecoder* aDecoder,
  1.2092 +                         MediaDecoderStateMachine* aStateMachine)
  1.2093 +    : mDecoder(aDecoder), mStateMachine(aStateMachine) {}
  1.2094 +  NS_IMETHOD Run() {
  1.2095 +    NS_DispatchToMainThread(new nsDecoderDisposeEvent(mDecoder.forget(),
  1.2096 +                                                      mStateMachine.forget()));
  1.2097 +    return NS_OK;
  1.2098 +  }
  1.2099 +private:
  1.2100 +  nsRefPtr<MediaDecoder> mDecoder;
  1.2101 +  nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  1.2102 +};
  1.2103 +
  1.2104 +nsresult MediaDecoderStateMachine::RunStateMachine()
  1.2105 +{
  1.2106 +  AssertCurrentThreadInMonitor();
  1.2107 +
  1.2108 +  MediaResource* resource = mDecoder->GetResource();
  1.2109 +  NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER);
  1.2110 +
  1.2111 +  switch (mState) {
  1.2112 +    case DECODER_STATE_SHUTDOWN: {
  1.2113 +      if (IsPlaying()) {
  1.2114 +        StopPlayback();
  1.2115 +      }
  1.2116 +      StopAudioThread();
  1.2117 +      // If mAudioThread is non-null after StopAudioThread completes, we are
  1.2118 +      // running in a nested event loop waiting for Shutdown() on
  1.2119 +      // mAudioThread to complete.  Return to the event loop and let it
  1.2120 +      // finish processing before continuing with shutdown.
  1.2121 +      if (mAudioThread) {
  1.2122 +        MOZ_ASSERT(mStopAudioThread);
  1.2123 +        return NS_OK;
  1.2124 +      }
  1.2125 +
  1.2126 +      // The reader's listeners hold references to the state machine,
  1.2127 +      // creating a cycle which keeps the state machine and its shared
  1.2128 +      // thread pools alive. So break it here.
  1.2129 +      AudioQueue().ClearListeners();
  1.2130 +      VideoQueue().ClearListeners();
  1.2131 +
  1.2132 +      {
  1.2133 +        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2134 +        // Wait for the thread decoding to exit.
  1.2135 +        mDecodeTaskQueue->Shutdown();
  1.2136 +        mDecodeTaskQueue = nullptr;
  1.2137 +        mReader->ReleaseMediaResources();
  1.2138 +      }
  1.2139 +      // Now that those threads are stopped, there's no possibility of
  1.2140 +      // mPendingWakeDecoder being needed again. Revoke it.
  1.2141 +      mPendingWakeDecoder = nullptr;
  1.2142 +
  1.2143 +      MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN,
  1.2144 +                 "How did we escape from the shutdown state?");
  1.2145 +      // We must daisy-chain these events to destroy the decoder. We must
  1.2146 +      // destroy the decoder on the main thread, but we can't destroy the
  1.2147 +      // decoder while this thread holds the decoder monitor. We can't
  1.2148 +      // dispatch an event to the main thread to destroy the decoder from
  1.2149 +      // here, as the event may run before the dispatch returns, and we
  1.2150 +      // hold the decoder monitor here. We also want to guarantee that the
  1.2151 +      // state machine is destroyed on the main thread, and so the
  1.2152 +      // event runner running this function (which holds a reference to the
  1.2153 +      // state machine) needs to finish and be released in order to allow
  1.2154 +      // that. So we dispatch an event to run after this event runner has
  1.2155 +      // finished and released its monitor/references. That event then will
  1.2156 +      // dispatch an event to the main thread to release the decoder and
  1.2157 +      // state machine.
  1.2158 +      GetStateMachineThread()->Dispatch(
  1.2159 +        new nsDispatchDisposeEvent(mDecoder, this), NS_DISPATCH_NORMAL);
  1.2160 +
  1.2161 +      mTimer->Cancel();
  1.2162 +      mTimer = nullptr;
  1.2163 +      return NS_OK;
  1.2164 +    }
  1.2165 +
  1.2166 +    case DECODER_STATE_DORMANT: {
  1.2167 +      if (IsPlaying()) {
  1.2168 +        StopPlayback();
  1.2169 +      }
  1.2170 +      StopAudioThread();
  1.2171 +      // Now that those threads are stopped, there's no possibility of
  1.2172 +      // mPendingWakeDecoder being needed again. Revoke it.
  1.2173 +      mPendingWakeDecoder = nullptr;
  1.2174 +      {
  1.2175 +        ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2176 +        // Wait for the thread decoding, if any, to exit.
  1.2177 +        mDecodeTaskQueue->AwaitIdle();
  1.2178 +        mReader->ReleaseMediaResources();
  1.2179 +      }
  1.2180 +      return NS_OK;
  1.2181 +    }
  1.2182 +
  1.2183 +    case DECODER_STATE_WAIT_FOR_RESOURCES: {
  1.2184 +      return NS_OK;
  1.2185 +    }
  1.2186 +
  1.2187 +    case DECODER_STATE_DECODING_METADATA: {
  1.2188 +      // Ensure we have a decode thread to decode metadata.
  1.2189 +      return EnqueueDecodeMetadataTask();
  1.2190 +    }
  1.2191 +
  1.2192 +    case DECODER_STATE_DECODING: {
  1.2193 +      if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING &&
  1.2194 +          IsPlaying())
  1.2195 +      {
  1.2196 +        // We're playing, but the element/decoder is in paused state. Stop
  1.2197 +        // playing!
  1.2198 +        StopPlayback();
  1.2199 +      }
  1.2200 +
  1.2201 +      if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1.2202 +          !IsPlaying()) {
  1.2203 +        // We are playing, but the state machine does not know it yet. Tell it
  1.2204 +        // that it is, so that the clock can be properly queried.
  1.2205 +        StartPlayback();
  1.2206 +      }
  1.2207 +
  1.2208 +      AdvanceFrame();
  1.2209 +      NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING ||
  1.2210 +                   IsStateMachineScheduled() ||
  1.2211 +                   mPlaybackRate == 0.0, "Must have timer scheduled");
  1.2212 +      return NS_OK;
  1.2213 +    }
  1.2214 +
  1.2215 +    case DECODER_STATE_BUFFERING: {
  1.2216 +      TimeStamp now = TimeStamp::Now();
  1.2217 +      NS_ASSERTION(!mBufferingStart.IsNull(), "Must know buffering start time.");
  1.2218 +
  1.2219 +      // We will remain in the buffering state if we've not decoded enough
  1.2220 +      // data to begin playback, or if we've not downloaded a reasonable
  1.2221 +      // amount of data inside our buffering time.
  1.2222 +      TimeDuration elapsed = now - mBufferingStart;
  1.2223 +      bool isLiveStream = resource->GetLength() == -1;
  1.2224 +      if ((isLiveStream || !mDecoder->CanPlayThrough()) &&
  1.2225 +            elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) &&
  1.2226 +            (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS)
  1.2227 +                            : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) &&
  1.2228 +            !mDecoder->IsDataCachedToEndOfResource() &&
  1.2229 +            !resource->IsSuspended())
  1.2230 +      {
  1.2231 +        DECODER_LOG(PR_LOG_DEBUG, "Buffering: wait %ds, timeout in %.3lfs %s",
  1.2232 +                    mBufferingWait, mBufferingWait - elapsed.ToSeconds(),
  1.2233 +                    (mQuickBuffering ? "(quick exit)" : ""));
  1.2234 +        ScheduleStateMachine(USECS_PER_S);
  1.2235 +        return NS_OK;
  1.2236 +      } else {
  1.2237 +        DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING");
  1.2238 +        DECODER_LOG(PR_LOG_DEBUG, "Buffered for %.3lfs", (now - mBufferingStart).ToSeconds());
  1.2239 +        StartDecoding();
  1.2240 +      }
  1.2241 +
  1.2242 +      // Notify to allow blocked decoder thread to continue
  1.2243 +      mDecoder->GetReentrantMonitor().NotifyAll();
  1.2244 +      UpdateReadyState();
  1.2245 +      if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1.2246 +          !IsPlaying())
  1.2247 +      {
  1.2248 +        StartPlayback();
  1.2249 +      }
  1.2250 +      NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
  1.2251 +      return NS_OK;
  1.2252 +    }
  1.2253 +
  1.2254 +    case DECODER_STATE_SEEKING: {
  1.2255 +      // Ensure we have a decode thread to perform the seek.
  1.2256 +     return EnqueueDecodeSeekTask();
  1.2257 +    }
  1.2258 +
  1.2259 +    case DECODER_STATE_COMPLETED: {
  1.2260 +      // Play the remaining media. We want to run AdvanceFrame() at least
  1.2261 +      // once to ensure the current playback position is advanced to the
  1.2262 +      // end of the media, and so that we update the readyState.
  1.2263 +      if (VideoQueue().GetSize() > 0 ||
  1.2264 +          (HasAudio() && !mAudioCompleted) ||
  1.2265 +          (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished()))
  1.2266 +      {
  1.2267 +        AdvanceFrame();
  1.2268 +        NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING ||
  1.2269 +                     mPlaybackRate == 0 ||
  1.2270 +                     IsStateMachineScheduled(),
  1.2271 +                     "Must have timer scheduled");
  1.2272 +        return NS_OK;
  1.2273 +      }
  1.2274 +
  1.2275 +      // StopPlayback in order to reset the IsPlaying() state so audio
  1.2276 +      // is restarted correctly.
  1.2277 +      StopPlayback();
  1.2278 +
  1.2279 +      if (mState != DECODER_STATE_COMPLETED) {
  1.2280 +        // While we're presenting a frame we can change state. Whatever changed
  1.2281 +        // our state should have scheduled another state machine run.
  1.2282 +        NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled");
  1.2283 +        return NS_OK;
  1.2284 +      }
  1.2285 +
  1.2286 +      StopAudioThread();
  1.2287 +      // When we're decoding to a stream, the stream's main-thread finish signal
  1.2288 +      // will take care of calling MediaDecoder::PlaybackEnded.
  1.2289 +      if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1.2290 +          !mDecoder->GetDecodedStream()) {
  1.2291 +        int64_t videoTime = HasVideo() ? mVideoFrameEndTime : 0;
  1.2292 +        int64_t clockTime = std::max(mEndTime, std::max(videoTime, GetAudioClock()));
  1.2293 +        UpdatePlaybackPosition(clockTime);
  1.2294 +
  1.2295 +        {
  1.2296 +          // Wait for the state change is completed in the main thread,
  1.2297 +          // otherwise we might see |mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING|
  1.2298 +          // in next loop and send |MediaDecoder::PlaybackEnded| again to trigger 'ended'
  1.2299 +          // event twice in the media element.
  1.2300 +          ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2301 +          nsCOMPtr<nsIRunnable> event =
  1.2302 +            NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded);
  1.2303 +          NS_DispatchToMainThread(event, NS_DISPATCH_SYNC);
  1.2304 +        }
  1.2305 +      }
  1.2306 +      return NS_OK;
  1.2307 +    }
  1.2308 +  }
  1.2309 +
  1.2310 +  return NS_OK;
  1.2311 +}
  1.2312 +
  1.2313 +void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData,
  1.2314 +                                                TimeStamp aTarget)
  1.2315 +{
  1.2316 +  NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(),
  1.2317 +               "Should be on state machine or decode thread.");
  1.2318 +  mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
  1.2319 +
  1.2320 +  if (aData->mDuplicate) {
  1.2321 +    return;
  1.2322 +  }
  1.2323 +
  1.2324 +  VERBOSE_LOG("playing video frame %lld", aData->mTime);
  1.2325 +
  1.2326 +  VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
  1.2327 +  if (container) {
  1.2328 +    container->SetCurrentFrame(ThebesIntSize(aData->mDisplay), aData->mImage,
  1.2329 +                               aTarget);
  1.2330 +  }
  1.2331 +}
  1.2332 +
  1.2333 +int64_t
  1.2334 +MediaDecoderStateMachine::GetAudioClock()
  1.2335 +{
  1.2336 +  // We must hold the decoder monitor while using the audio stream off the
  1.2337 +  // audio thread to ensure that it doesn't get destroyed on the audio thread
  1.2338 +  // while we're using it.
  1.2339 +  AssertCurrentThreadInMonitor();
  1.2340 +  if (!HasAudio() || mAudioCaptured)
  1.2341 +    return -1;
  1.2342 +  if (!mAudioStream) {
  1.2343 +    // Audio thread hasn't played any data yet.
  1.2344 +    return mAudioStartTime;
  1.2345 +  }
  1.2346 +  int64_t t = mAudioStream->GetPosition();
  1.2347 +  return (t == -1) ? -1 : t + mAudioStartTime;
  1.2348 +}
  1.2349 +
  1.2350 +int64_t MediaDecoderStateMachine::GetVideoStreamPosition()
  1.2351 +{
  1.2352 +  AssertCurrentThreadInMonitor();
  1.2353 +
  1.2354 +  if (!IsPlaying()) {
  1.2355 +    return mPlayDuration + mStartTime;
  1.2356 +  }
  1.2357 +
  1.2358 +  // The playbackRate has been just been changed, reset the playstartTime.
  1.2359 +  if (mResetPlayStartTime) {
  1.2360 +    mPlayStartTime = TimeStamp::Now();
  1.2361 +    mResetPlayStartTime = false;
  1.2362 +  }
  1.2363 +
  1.2364 +  int64_t pos = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration;
  1.2365 +  pos -= mBasePosition;
  1.2366 +  NS_ASSERTION(pos >= 0, "Video stream position should be positive.");
  1.2367 +  return mBasePosition + pos * mPlaybackRate + mStartTime;
  1.2368 +}
  1.2369 +
  1.2370 +int64_t MediaDecoderStateMachine::GetClock()
  1.2371 +{
  1.2372 +  AssertCurrentThreadInMonitor();
  1.2373 +
  1.2374 +  // Determine the clock time. If we've got audio, and we've not reached
  1.2375 +  // the end of the audio, use the audio clock. However if we've finished
  1.2376 +  // audio, or don't have audio, use the system clock. If our output is being
  1.2377 +  // fed to a MediaStream, use that stream as the source of the clock.
  1.2378 +  int64_t clock_time = -1;
  1.2379 +  DecodedStreamData* stream = mDecoder->GetDecodedStream();
  1.2380 +  if (!IsPlaying()) {
  1.2381 +    clock_time = mPlayDuration + mStartTime;
  1.2382 +  } else if (stream) {
  1.2383 +    clock_time = GetCurrentTimeViaMediaStreamSync();
  1.2384 +  } else {
  1.2385 +    int64_t audio_time = GetAudioClock();
  1.2386 +    if (HasAudio() && !mAudioCompleted && audio_time != -1) {
  1.2387 +      clock_time = audio_time;
  1.2388 +      // Resync against the audio clock, while we're trusting the
  1.2389 +      // audio clock. This ensures no "drift", particularly on Linux.
  1.2390 +      mPlayDuration = clock_time - mStartTime;
  1.2391 +      mPlayStartTime = TimeStamp::Now();
  1.2392 +    } else {
  1.2393 +      // Audio is disabled on this system. Sync to the system clock.
  1.2394 +      clock_time = GetVideoStreamPosition();
  1.2395 +      // Ensure the clock can never go backwards.
  1.2396 +      NS_ASSERTION(mCurrentFrameTime <= clock_time || mPlaybackRate <= 0,
  1.2397 +          "Clock should go forwards if the playback rate is > 0.");
  1.2398 +    }
  1.2399 +  }
  1.2400 +  return clock_time;
  1.2401 +}
  1.2402 +
  1.2403 +void MediaDecoderStateMachine::AdvanceFrame()
  1.2404 +{
  1.2405 +  NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
  1.2406 +  AssertCurrentThreadInMonitor();
  1.2407 +  NS_ASSERTION(!HasAudio() || mAudioStartTime != -1,
  1.2408 +               "Should know audio start time if we have audio.");
  1.2409 +
  1.2410 +  if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING) {
  1.2411 +    return;
  1.2412 +  }
  1.2413 +
  1.2414 +  // If playbackRate is 0.0, we should stop the progress, but not be in paused
  1.2415 +  // state, per spec.
  1.2416 +  if (mPlaybackRate == 0.0) {
  1.2417 +    return;
  1.2418 +  }
  1.2419 +
  1.2420 +  int64_t clock_time = GetClock();
  1.2421 +  // Skip frames up to the frame at the playback position, and figure out
  1.2422 +  // the time remaining until it's time to display the next frame.
  1.2423 +  int64_t remainingTime = AUDIO_DURATION_USECS;
  1.2424 +  NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time.");
  1.2425 +  nsAutoPtr<VideoData> currentFrame;
  1.2426 +#ifdef PR_LOGGING
  1.2427 +  int32_t droppedFrames = 0;
  1.2428 +#endif
  1.2429 +  if (VideoQueue().GetSize() > 0) {
  1.2430 +    VideoData* frame = VideoQueue().PeekFront();
  1.2431 +    while (mRealTime || clock_time >= frame->mTime) {
  1.2432 +      mVideoFrameEndTime = frame->GetEndTime();
  1.2433 +      currentFrame = frame;
  1.2434 +#ifdef PR_LOGGING
  1.2435 +      VERBOSE_LOG("discarding video frame %lld", frame->mTime);
  1.2436 +      if (droppedFrames++) {
  1.2437 +        VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1);
  1.2438 +      }
  1.2439 +#endif
  1.2440 +      VideoQueue().PopFront();
  1.2441 +      // Notify the decode thread that the video queue's buffers may have
  1.2442 +      // free'd up space for more frames.
  1.2443 +      mDecoder->GetReentrantMonitor().NotifyAll();
  1.2444 +      mDecoder->UpdatePlaybackOffset(frame->mOffset);
  1.2445 +      if (VideoQueue().GetSize() == 0)
  1.2446 +        break;
  1.2447 +      frame = VideoQueue().PeekFront();
  1.2448 +    }
  1.2449 +    // Current frame has already been presented, wait until it's time to
  1.2450 +    // present the next frame.
  1.2451 +    if (frame && !currentFrame) {
  1.2452 +      int64_t now = IsPlaying() ? clock_time : mPlayDuration;
  1.2453 +
  1.2454 +      remainingTime = frame->mTime - now;
  1.2455 +    }
  1.2456 +  }
  1.2457 +
  1.2458 +  // Check to see if we don't have enough data to play up to the next frame.
  1.2459 +  // If we don't, switch to buffering mode.
  1.2460 +  MediaResource* resource = mDecoder->GetResource();
  1.2461 +  if (mState == DECODER_STATE_DECODING &&
  1.2462 +      mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING &&
  1.2463 +      HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) &&
  1.2464 +      !mDecoder->IsDataCachedToEndOfResource() &&
  1.2465 +      !resource->IsSuspended()) {
  1.2466 +    if (JustExitedQuickBuffering() || HasLowUndecodedData()) {
  1.2467 +      if (currentFrame) {
  1.2468 +        VideoQueue().PushFront(currentFrame.forget());
  1.2469 +      }
  1.2470 +      StartBuffering();
  1.2471 +      // Don't go straight back to the state machine loop since that might
  1.2472 +      // cause us to start decoding again and we could flip-flop between
  1.2473 +      // decoding and quick-buffering.
  1.2474 +      ScheduleStateMachine(USECS_PER_S);
  1.2475 +      return;
  1.2476 +    }
  1.2477 +  }
  1.2478 +
  1.2479 +  // We've got enough data to keep playing until at least the next frame.
  1.2480 +  // Start playing now if need be.
  1.2481 +  if (!IsPlaying() && ((mFragmentEndTime >= 0 && clock_time < mFragmentEndTime) || mFragmentEndTime < 0)) {
  1.2482 +    StartPlayback();
  1.2483 +  }
  1.2484 +
  1.2485 +  if (currentFrame) {
  1.2486 +    // Decode one frame and display it.
  1.2487 +    TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) +
  1.2488 +                          UsecsToDuration(currentFrame->mTime - mStartTime);
  1.2489 +    NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time");
  1.2490 +    {
  1.2491 +      ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2492 +      // If we have video, we want to increment the clock in steps of the frame
  1.2493 +      // duration.
  1.2494 +      RenderVideoFrame(currentFrame, presTime);
  1.2495 +    }
  1.2496 +    // If we're no longer playing after dropping and reacquiring the lock,
  1.2497 +    // playback must've been stopped on the decode thread (by a seek, for
  1.2498 +    // example).  In that case, the current frame is probably out of date.
  1.2499 +    if (!IsPlaying()) {
  1.2500 +      ScheduleStateMachine();
  1.2501 +      return;
  1.2502 +    }
  1.2503 +    MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics();
  1.2504 +    frameStats.NotifyPresentedFrame();
  1.2505 +    remainingTime = currentFrame->GetEndTime() - clock_time;
  1.2506 +    currentFrame = nullptr;
  1.2507 +  }
  1.2508 +
  1.2509 +  // Cap the current time to the larger of the audio and video end time.
  1.2510 +  // This ensures that if we're running off the system clock, we don't
  1.2511 +  // advance the clock to after the media end time.
  1.2512 +  if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) {
  1.2513 +    // These will be non -1 if we've displayed a video frame, or played an audio frame.
  1.2514 +    clock_time = std::min(clock_time, std::max(mVideoFrameEndTime, mAudioEndTime));
  1.2515 +    if (clock_time > GetMediaTime()) {
  1.2516 +      // Only update the playback position if the clock time is greater
  1.2517 +      // than the previous playback position. The audio clock can
  1.2518 +      // sometimes report a time less than its previously reported in
  1.2519 +      // some situations, and we need to gracefully handle that.
  1.2520 +      UpdatePlaybackPosition(clock_time);
  1.2521 +    }
  1.2522 +  }
  1.2523 +
  1.2524 +  // If the number of audio/video frames queued has changed, either by
  1.2525 +  // this function popping and playing a video frame, or by the audio
  1.2526 +  // thread popping and playing an audio frame, we may need to update our
  1.2527 +  // ready state. Post an update to do so.
  1.2528 +  UpdateReadyState();
  1.2529 +
  1.2530 +  ScheduleStateMachine(remainingTime);
  1.2531 +}
  1.2532 +
  1.2533 +void MediaDecoderStateMachine::Wait(int64_t aUsecs) {
  1.2534 +  NS_ASSERTION(OnAudioThread(), "Only call on the audio thread");
  1.2535 +  AssertCurrentThreadInMonitor();
  1.2536 +  TimeStamp end = TimeStamp::Now() + UsecsToDuration(std::max<int64_t>(USECS_PER_MS, aUsecs));
  1.2537 +  TimeStamp now;
  1.2538 +  while ((now = TimeStamp::Now()) < end &&
  1.2539 +         mState != DECODER_STATE_SHUTDOWN &&
  1.2540 +         mState != DECODER_STATE_SEEKING &&
  1.2541 +         !mStopAudioThread &&
  1.2542 +         IsPlaying())
  1.2543 +  {
  1.2544 +    int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000));
  1.2545 +    if (ms == 0 || ms > UINT32_MAX) {
  1.2546 +      break;
  1.2547 +    }
  1.2548 +    mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms)));
  1.2549 +  }
  1.2550 +}
  1.2551 +
  1.2552 +VideoData* MediaDecoderStateMachine::FindStartTime()
  1.2553 +{
  1.2554 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.2555 +  AssertCurrentThreadInMonitor();
  1.2556 +  int64_t startTime = 0;
  1.2557 +  mStartTime = 0;
  1.2558 +  VideoData* v = nullptr;
  1.2559 +  {
  1.2560 +    ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor());
  1.2561 +    v = mReader->FindStartTime(startTime);
  1.2562 +  }
  1.2563 +  if (startTime != 0) {
  1.2564 +    mStartTime = startTime;
  1.2565 +    if (mGotDurationFromMetaData) {
  1.2566 +      NS_ASSERTION(mEndTime != -1,
  1.2567 +                   "We should have mEndTime as supplied duration here");
  1.2568 +      // We were specified a duration from a Content-Duration HTTP header.
  1.2569 +      // Adjust mEndTime so that mEndTime-mStartTime matches the specified
  1.2570 +      // duration.
  1.2571 +      mEndTime = mStartTime + mEndTime;
  1.2572 +    }
  1.2573 +  }
  1.2574 +  // Set the audio start time to be start of media. If this lies before the
  1.2575 +  // first actual audio frame we have, we'll inject silence during playback
  1.2576 +  // to ensure the audio starts at the correct time.
  1.2577 +  mAudioStartTime = mStartTime;
  1.2578 +  DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime);
  1.2579 +  return v;
  1.2580 +}
  1.2581 +
  1.2582 +void MediaDecoderStateMachine::UpdateReadyState() {
  1.2583 +  AssertCurrentThreadInMonitor();
  1.2584 +
  1.2585 +  MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus();
  1.2586 +  if (nextFrameStatus == mLastFrameStatus) {
  1.2587 +    return;
  1.2588 +  }
  1.2589 +  mLastFrameStatus = nextFrameStatus;
  1.2590 +
  1.2591 +  /* This is a bit tricky. MediaDecoder::UpdateReadyStateForData will run on
  1.2592 +   * the main thread and re-evaluate GetNextFrameStatus there, passing it to
  1.2593 +   * HTMLMediaElement::UpdateReadyStateForData. It doesn't use the value of
  1.2594 +   * GetNextFrameStatus we computed here, because what we're computing here
  1.2595 +   * could be stale by the time MediaDecoder::UpdateReadyStateForData runs.
  1.2596 +   * We only compute GetNextFrameStatus here to avoid posting runnables to the main
  1.2597 +   * thread unnecessarily.
  1.2598 +   */
  1.2599 +  nsCOMPtr<nsIRunnable> event;
  1.2600 +  event = NS_NewRunnableMethod(mDecoder, &MediaDecoder::UpdateReadyStateForData);
  1.2601 +  NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL);
  1.2602 +}
  1.2603 +
  1.2604 +bool MediaDecoderStateMachine::JustExitedQuickBuffering()
  1.2605 +{
  1.2606 +  return !mDecodeStartTime.IsNull() &&
  1.2607 +    mQuickBuffering &&
  1.2608 +    (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromMicroseconds(QUICK_BUFFER_THRESHOLD_USECS);
  1.2609 +}
  1.2610 +
  1.2611 +void MediaDecoderStateMachine::StartBuffering()
  1.2612 +{
  1.2613 +  AssertCurrentThreadInMonitor();
  1.2614 +
  1.2615 +  if (mState != DECODER_STATE_DECODING) {
  1.2616 +    // We only move into BUFFERING state if we're actually decoding.
  1.2617 +    // If we're currently doing something else, we don't need to buffer,
  1.2618 +    // and more importantly, we shouldn't overwrite mState to interrupt
  1.2619 +    // the current operation, as that could leave us in an inconsistent
  1.2620 +    // state!
  1.2621 +    return;
  1.2622 +  }
  1.2623 +
  1.2624 +  if (IsPlaying()) {
  1.2625 +    StopPlayback();
  1.2626 +  }
  1.2627 +
  1.2628 +  TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime;
  1.2629 +  // Go into quick buffering mode provided we've not just left buffering using
  1.2630 +  // a "quick exit". This stops us flip-flopping between playing and buffering
  1.2631 +  // when the download speed is similar to the decode speed.
  1.2632 +  mQuickBuffering =
  1.2633 +    !JustExitedQuickBuffering() &&
  1.2634 +    decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS);
  1.2635 +  mBufferingStart = TimeStamp::Now();
  1.2636 +
  1.2637 +  // We need to tell the element that buffering has started.
  1.2638 +  // We can't just directly send an asynchronous runnable that
  1.2639 +  // eventually fires the "waiting" event. The problem is that
  1.2640 +  // there might be pending main-thread events, such as "data
  1.2641 +  // received" notifications, that mean we're not actually still
  1.2642 +  // buffering by the time this runnable executes. So instead
  1.2643 +  // we just trigger UpdateReadyStateForData; when it runs, it
  1.2644 +  // will check the current state and decide whether to tell
  1.2645 +  // the element we're buffering or not.
  1.2646 +  UpdateReadyState();
  1.2647 +  mState = DECODER_STATE_BUFFERING;
  1.2648 +  DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING to BUFFERING, decoded for %.3lfs",
  1.2649 +              decodeDuration.ToSeconds());
  1.2650 +#ifdef PR_LOGGING
  1.2651 +  MediaDecoder::Statistics stats = mDecoder->GetStatistics();
  1.2652 +  DECODER_LOG(PR_LOG_DEBUG, "Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s",
  1.2653 +              stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)",
  1.2654 +              stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)");
  1.2655 +#endif
  1.2656 +}
  1.2657 +
  1.2658 +nsresult MediaDecoderStateMachine::GetBuffered(dom::TimeRanges* aBuffered) {
  1.2659 +  MediaResource* resource = mDecoder->GetResource();
  1.2660 +  NS_ENSURE_TRUE(resource, NS_ERROR_FAILURE);
  1.2661 +  resource->Pin();
  1.2662 +  nsresult res = mReader->GetBuffered(aBuffered, mStartTime);
  1.2663 +  resource->Unpin();
  1.2664 +  return res;
  1.2665 +}
  1.2666 +
  1.2667 +nsresult MediaDecoderStateMachine::CallRunStateMachine()
  1.2668 +{
  1.2669 +  AssertCurrentThreadInMonitor();
  1.2670 +  NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread.");
  1.2671 +
  1.2672 +  // If audio is being captured, stop the audio thread if it's running
  1.2673 +  if (mAudioCaptured) {
  1.2674 +    StopAudioThread();
  1.2675 +  }
  1.2676 +
  1.2677 +  MOZ_ASSERT(!mInRunningStateMachine, "State machine cycles must run in sequence!");
  1.2678 +  mTimeout = TimeStamp();
  1.2679 +  mInRunningStateMachine = true;
  1.2680 +  nsresult res = RunStateMachine();
  1.2681 +  mInRunningStateMachine = false;
  1.2682 +  return res;
  1.2683 +}
  1.2684 +
  1.2685 +nsresult MediaDecoderStateMachine::TimeoutExpired(int aTimerId)
  1.2686 +{
  1.2687 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.2688 +  NS_ASSERTION(OnStateMachineThread(), "Must be on state machine thread");
  1.2689 +  mTimer->Cancel();
  1.2690 +  if (mTimerId == aTimerId) {
  1.2691 +    return CallRunStateMachine();
  1.2692 +  } else {
  1.2693 +    return NS_OK;
  1.2694 +  }
  1.2695 +}
  1.2696 +
  1.2697 +void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() {
  1.2698 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.2699 +  DispatchAudioDecodeTaskIfNeeded();
  1.2700 +  DispatchVideoDecodeTaskIfNeeded();
  1.2701 +}
  1.2702 +
  1.2703 +class TimerEvent : public nsITimerCallback, public nsRunnable {
  1.2704 +  NS_DECL_THREADSAFE_ISUPPORTS
  1.2705 +public:
  1.2706 +  TimerEvent(MediaDecoderStateMachine* aStateMachine, int aTimerId)
  1.2707 +    : mStateMachine(aStateMachine), mTimerId(aTimerId) {}
  1.2708 +
  1.2709 +  NS_IMETHOD Run() MOZ_OVERRIDE {
  1.2710 +    return mStateMachine->TimeoutExpired(mTimerId);
  1.2711 +  }
  1.2712 +
  1.2713 +  NS_IMETHOD Notify(nsITimer* aTimer) {
  1.2714 +    return mStateMachine->TimeoutExpired(mTimerId);
  1.2715 +  }
  1.2716 +private:
  1.2717 +  const nsRefPtr<MediaDecoderStateMachine> mStateMachine;
  1.2718 +  int mTimerId;
  1.2719 +};
  1.2720 +
  1.2721 +NS_IMPL_ISUPPORTS(TimerEvent, nsITimerCallback, nsIRunnable);
  1.2722 +
  1.2723 +nsresult MediaDecoderStateMachine::ScheduleStateMachine(int64_t aUsecs) {
  1.2724 +  AssertCurrentThreadInMonitor();
  1.2725 +  NS_ABORT_IF_FALSE(GetStateMachineThread(),
  1.2726 +    "Must have a state machine thread to schedule");
  1.2727 +
  1.2728 +  if (mState == DECODER_STATE_SHUTDOWN) {
  1.2729 +    return NS_ERROR_FAILURE;
  1.2730 +  }
  1.2731 +  aUsecs = std::max<int64_t>(aUsecs, 0);
  1.2732 +
  1.2733 +  TimeStamp timeout = TimeStamp::Now() + UsecsToDuration(aUsecs);
  1.2734 +  if (!mTimeout.IsNull() && timeout >= mTimeout) {
  1.2735 +    // We've already scheduled a timer set to expire at or before this time,
  1.2736 +    // or have an event dispatched to run the state machine.
  1.2737 +    return NS_OK;
  1.2738 +  }
  1.2739 +
  1.2740 +  uint32_t ms = static_cast<uint32_t>((aUsecs / USECS_PER_MS) & 0xFFFFFFFF);
  1.2741 +  if (mRealTime && ms > 40) {
  1.2742 +    ms = 40;
  1.2743 +  }
  1.2744 +
  1.2745 +  // Don't cancel the timer here for this function will be called from
  1.2746 +  // different threads.
  1.2747 +
  1.2748 +  nsresult rv = NS_ERROR_FAILURE;
  1.2749 +  nsRefPtr<TimerEvent> event = new TimerEvent(this, mTimerId+1);
  1.2750 +
  1.2751 +  if (ms == 0) {
  1.2752 +    // Dispatch a runnable to the state machine thread when delay is 0.
  1.2753 +    // It will has less latency than dispatching a runnable to the state
  1.2754 +    // machine thread which will then schedule a zero-delay timer.
  1.2755 +    rv = GetStateMachineThread()->Dispatch(event, NS_DISPATCH_NORMAL);
  1.2756 +  } else if (OnStateMachineThread()) {
  1.2757 +    rv = mTimer->InitWithCallback(event, ms, nsITimer::TYPE_ONE_SHOT);
  1.2758 +  } else {
  1.2759 +    MOZ_ASSERT(false, "non-zero delay timer should be only scheduled in state machine thread");
  1.2760 +  }
  1.2761 +
  1.2762 +  if (NS_SUCCEEDED(rv)) {
  1.2763 +    mTimeout = timeout;
  1.2764 +    ++mTimerId;
  1.2765 +  } else {
  1.2766 +    NS_WARNING("Failed to schedule state machine");
  1.2767 +  }
  1.2768 +
  1.2769 +  return rv;
  1.2770 +}
  1.2771 +
  1.2772 +bool MediaDecoderStateMachine::OnDecodeThread() const
  1.2773 +{
  1.2774 +  return mDecodeTaskQueue->IsCurrentThreadIn();
  1.2775 +}
  1.2776 +
  1.2777 +bool MediaDecoderStateMachine::OnStateMachineThread() const
  1.2778 +{
  1.2779 +  bool rv = false;
  1.2780 +  mStateMachineThreadPool->IsOnCurrentThread(&rv);
  1.2781 +  return rv;
  1.2782 +}
  1.2783 +
  1.2784 +nsIEventTarget* MediaDecoderStateMachine::GetStateMachineThread()
  1.2785 +{
  1.2786 +  return mStateMachineThreadPool->GetEventTarget();
  1.2787 +}
  1.2788 +
  1.2789 +void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate)
  1.2790 +{
  1.2791 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.2792 +  NS_ASSERTION(aPlaybackRate != 0,
  1.2793 +      "PlaybackRate == 0 should be handled before this function.");
  1.2794 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.2795 +
  1.2796 +  if (mPlaybackRate == aPlaybackRate) {
  1.2797 +    return;
  1.2798 +  }
  1.2799 +
  1.2800 +  // Get position of the last time we changed the rate.
  1.2801 +  if (!HasAudio()) {
  1.2802 +    // mBasePosition is a position in the video stream, not an absolute time.
  1.2803 +    if (mState == DECODER_STATE_SEEKING) {
  1.2804 +      mBasePosition = mSeekTarget.mTime - mStartTime;
  1.2805 +    } else {
  1.2806 +      mBasePosition = GetVideoStreamPosition();
  1.2807 +    }
  1.2808 +    mPlayDuration = mBasePosition;
  1.2809 +    mResetPlayStartTime = true;
  1.2810 +    mPlayStartTime = TimeStamp::Now();
  1.2811 +  }
  1.2812 +
  1.2813 +  mPlaybackRate = aPlaybackRate;
  1.2814 +}
  1.2815 +
  1.2816 +void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch)
  1.2817 +{
  1.2818 +  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  1.2819 +  ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
  1.2820 +
  1.2821 +  mPreservesPitch = aPreservesPitch;
  1.2822 +}
  1.2823 +
  1.2824 +void
  1.2825 +MediaDecoderStateMachine::SetMinimizePrerollUntilPlaybackStarts()
  1.2826 +{
  1.2827 +  AssertCurrentThreadInMonitor();
  1.2828 +  mMinimizePreroll = true;
  1.2829 +}
  1.2830 +
  1.2831 +bool MediaDecoderStateMachine::IsShutdown()
  1.2832 +{
  1.2833 +  AssertCurrentThreadInMonitor();
  1.2834 +  return GetState() == DECODER_STATE_SHUTDOWN;
  1.2835 +}
  1.2836 +
  1.2837 +void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime,
  1.2838 +                                             int aChannels,
  1.2839 +                                             int aRate,
  1.2840 +                                             bool aHasAudio,
  1.2841 +                                             bool aHasVideo,
  1.2842 +                                             MetadataTags* aTags)
  1.2843 +{
  1.2844 +  NS_ASSERTION(OnDecodeThread(), "Should be on decode thread.");
  1.2845 +  AssertCurrentThreadInMonitor();
  1.2846 +  TimedMetadata* metadata = new TimedMetadata;
  1.2847 +  metadata->mPublishTime = aPublishTime;
  1.2848 +  metadata->mChannels = aChannels;
  1.2849 +  metadata->mRate = aRate;
  1.2850 +  metadata->mHasAudio = aHasAudio;
  1.2851 +  metadata->mHasVideo = aHasVideo;
  1.2852 +  metadata->mTags = aTags;
  1.2853 +  mMetadataManager.QueueMetadata(metadata);
  1.2854 +}
  1.2855 +
  1.2856 +} // namespace mozilla
  1.2857 +
  1.2858 +// avoid redefined macro in unified build
  1.2859 +#undef DECODER_LOG
  1.2860 +#undef VERBOSE_LOG

mercurial