Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 2 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 3 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 4 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 5 | |
michael@0 | 6 | #ifdef XP_WIN |
michael@0 | 7 | // Include Windows headers required for enabling high precision timers. |
michael@0 | 8 | #include "windows.h" |
michael@0 | 9 | #include "mmsystem.h" |
michael@0 | 10 | #endif |
michael@0 | 11 | |
michael@0 | 12 | #include "mozilla/DebugOnly.h" |
michael@0 | 13 | #include <stdint.h> |
michael@0 | 14 | |
michael@0 | 15 | #include "MediaDecoderStateMachine.h" |
michael@0 | 16 | #include "AudioStream.h" |
michael@0 | 17 | #include "nsTArray.h" |
michael@0 | 18 | #include "MediaDecoder.h" |
michael@0 | 19 | #include "MediaDecoderReader.h" |
michael@0 | 20 | #include "mozilla/mozalloc.h" |
michael@0 | 21 | #include "VideoUtils.h" |
michael@0 | 22 | #include "mozilla/dom/TimeRanges.h" |
michael@0 | 23 | #include "nsDeque.h" |
michael@0 | 24 | #include "AudioSegment.h" |
michael@0 | 25 | #include "VideoSegment.h" |
michael@0 | 26 | #include "ImageContainer.h" |
michael@0 | 27 | #include "nsComponentManagerUtils.h" |
michael@0 | 28 | #include "nsITimer.h" |
michael@0 | 29 | #include "nsContentUtils.h" |
michael@0 | 30 | #include "MediaShutdownManager.h" |
michael@0 | 31 | #include "SharedThreadPool.h" |
michael@0 | 32 | #include "MediaTaskQueue.h" |
michael@0 | 33 | #include "nsIEventTarget.h" |
michael@0 | 34 | #include "prenv.h" |
michael@0 | 35 | #include "mozilla/Preferences.h" |
michael@0 | 36 | #include "gfx2DGlue.h" |
michael@0 | 37 | |
michael@0 | 38 | #include <algorithm> |
michael@0 | 39 | |
michael@0 | 40 | namespace mozilla { |
michael@0 | 41 | |
michael@0 | 42 | using namespace mozilla::layers; |
michael@0 | 43 | using namespace mozilla::dom; |
michael@0 | 44 | using namespace mozilla::gfx; |
michael@0 | 45 | |
michael@0 | 46 | // avoid redefined macro in unified build |
michael@0 | 47 | #undef DECODER_LOG |
michael@0 | 48 | #undef VERBOSE_LOG |
michael@0 | 49 | |
michael@0 | 50 | #ifdef PR_LOGGING |
michael@0 | 51 | extern PRLogModuleInfo* gMediaDecoderLog; |
michael@0 | 52 | #define DECODER_LOG(type, msg, ...) \ |
michael@0 | 53 | PR_LOG(gMediaDecoderLog, type, ("Decoder=%p " msg, mDecoder.get(), ##__VA_ARGS__)) |
michael@0 | 54 | #define VERBOSE_LOG(msg, ...) \ |
michael@0 | 55 | PR_BEGIN_MACRO \ |
michael@0 | 56 | if (!PR_GetEnv("MOZ_QUIET")) { \ |
michael@0 | 57 | DECODER_LOG(PR_LOG_DEBUG, msg, ##__VA_ARGS__); \ |
michael@0 | 58 | } \ |
michael@0 | 59 | PR_END_MACRO |
michael@0 | 60 | #else |
michael@0 | 61 | #define DECODER_LOG(type, msg, ...) |
michael@0 | 62 | #define VERBOSE_LOG(msg, ...) |
michael@0 | 63 | #endif |
michael@0 | 64 | |
michael@0 | 65 | // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to |
michael@0 | 66 | // GetTickCount() and conflicts with MediaDecoderStateMachine::GetCurrentTime |
michael@0 | 67 | // implementation. With unified builds, putting this in headers is not enough. |
michael@0 | 68 | #ifdef GetCurrentTime |
michael@0 | 69 | #undef GetCurrentTime |
michael@0 | 70 | #endif |
michael@0 | 71 | |
michael@0 | 72 | // Wait this number of seconds when buffering, then leave and play |
michael@0 | 73 | // as best as we can if the required amount of data hasn't been |
michael@0 | 74 | // retrieved. |
michael@0 | 75 | static const uint32_t BUFFERING_WAIT_S = 30; |
michael@0 | 76 | |
michael@0 | 77 | // If audio queue has less than this many usecs of decoded audio, we won't risk |
michael@0 | 78 | // trying to decode the video, we'll skip decoding video up to the next |
michael@0 | 79 | // keyframe. We may increase this value for an individual decoder if we |
michael@0 | 80 | // encounter video frames which take a long time to decode. |
michael@0 | 81 | static const uint32_t LOW_AUDIO_USECS = 300000; |
michael@0 | 82 | |
michael@0 | 83 | // If more than this many usecs of decoded audio is queued, we'll hold off |
michael@0 | 84 | // decoding more audio. If we increase the low audio threshold (see |
michael@0 | 85 | // LOW_AUDIO_USECS above) we'll also increase this value to ensure it's not |
michael@0 | 86 | // less than the low audio threshold. |
michael@0 | 87 | const int64_t AMPLE_AUDIO_USECS = 1000000; |
michael@0 | 88 | |
michael@0 | 89 | // When we're only playing audio and we don't have a video stream, we divide |
michael@0 | 90 | // AMPLE_AUDIO_USECS and LOW_AUDIO_USECS by the following value. This reduces |
michael@0 | 91 | // the amount of decoded audio we buffer, reducing our memory usage. We only |
michael@0 | 92 | // need to decode far ahead when we're decoding video using software decoding, |
michael@0 | 93 | // as otherwise a long video decode could cause an audio underrun. |
michael@0 | 94 | const int64_t NO_VIDEO_AMPLE_AUDIO_DIVISOR = 8; |
michael@0 | 95 | |
michael@0 | 96 | // Maximum number of bytes we'll allocate and write at once to the audio |
michael@0 | 97 | // hardware when the audio stream contains missing frames and we're |
michael@0 | 98 | // writing silence in order to fill the gap. We limit our silence-writes |
michael@0 | 99 | // to 32KB in order to avoid allocating an impossibly large chunk of |
michael@0 | 100 | // memory if we encounter a large chunk of silence. |
michael@0 | 101 | const uint32_t SILENCE_BYTES_CHUNK = 32 * 1024; |
michael@0 | 102 | |
michael@0 | 103 | // If we have fewer than LOW_VIDEO_FRAMES decoded frames, and |
michael@0 | 104 | // we're not "prerolling video", we'll skip the video up to the next keyframe |
michael@0 | 105 | // which is at or after the current playback position. |
michael@0 | 106 | static const uint32_t LOW_VIDEO_FRAMES = 1; |
michael@0 | 107 | |
michael@0 | 108 | // Arbitrary "frame duration" when playing only audio. |
michael@0 | 109 | static const int AUDIO_DURATION_USECS = 40000; |
michael@0 | 110 | |
michael@0 | 111 | // If we increase our "low audio threshold" (see LOW_AUDIO_USECS above), we |
michael@0 | 112 | // use this as a factor in all our calculations. Increasing this will cause |
michael@0 | 113 | // us to be more likely to increase our low audio threshold, and to |
michael@0 | 114 | // increase it by more. |
michael@0 | 115 | static const int THRESHOLD_FACTOR = 2; |
michael@0 | 116 | |
michael@0 | 117 | // If we have less than this much undecoded data available, we'll consider |
michael@0 | 118 | // ourselves to be running low on undecoded data. We determine how much |
michael@0 | 119 | // undecoded data we have remaining using the reader's GetBuffered() |
michael@0 | 120 | // implementation. |
michael@0 | 121 | static const int64_t LOW_DATA_THRESHOLD_USECS = 5000000; |
michael@0 | 122 | |
michael@0 | 123 | // LOW_DATA_THRESHOLD_USECS needs to be greater than AMPLE_AUDIO_USECS, otherwise |
michael@0 | 124 | // the skip-to-keyframe logic can activate when we're running low on data. |
michael@0 | 125 | static_assert(LOW_DATA_THRESHOLD_USECS > AMPLE_AUDIO_USECS, |
michael@0 | 126 | "LOW_DATA_THRESHOLD_USECS is too small"); |
michael@0 | 127 | |
michael@0 | 128 | // Amount of excess usecs of data to add in to the "should we buffer" calculation. |
michael@0 | 129 | static const uint32_t EXHAUSTED_DATA_MARGIN_USECS = 60000; |
michael@0 | 130 | |
michael@0 | 131 | // If we enter buffering within QUICK_BUFFER_THRESHOLD_USECS seconds of starting |
michael@0 | 132 | // decoding, we'll enter "quick buffering" mode, which exits a lot sooner than |
michael@0 | 133 | // normal buffering mode. This exists so that if the decode-ahead exhausts the |
michael@0 | 134 | // downloaded data while decode/playback is just starting up (for example |
michael@0 | 135 | // after a seek while the media is still playing, or when playing a media |
michael@0 | 136 | // as soon as it's load started), we won't necessarily stop for 30s and wait |
michael@0 | 137 | // for buffering. We may actually be able to playback in this case, so exit |
michael@0 | 138 | // buffering early and try to play. If it turns out we can't play, we'll fall |
michael@0 | 139 | // back to buffering normally. |
michael@0 | 140 | static const uint32_t QUICK_BUFFER_THRESHOLD_USECS = 2000000; |
michael@0 | 141 | |
michael@0 | 142 | // If we're quick buffering, we'll remain in buffering mode while we have less than |
michael@0 | 143 | // QUICK_BUFFERING_LOW_DATA_USECS of decoded data available. |
michael@0 | 144 | static const uint32_t QUICK_BUFFERING_LOW_DATA_USECS = 1000000; |
michael@0 | 145 | |
michael@0 | 146 | // If QUICK_BUFFERING_LOW_DATA_USECS is > AMPLE_AUDIO_USECS, we won't exit |
michael@0 | 147 | // quick buffering in a timely fashion, as the decode pauses when it |
michael@0 | 148 | // reaches AMPLE_AUDIO_USECS decoded data, and thus we'll never reach |
michael@0 | 149 | // QUICK_BUFFERING_LOW_DATA_USECS. |
michael@0 | 150 | static_assert(QUICK_BUFFERING_LOW_DATA_USECS <= AMPLE_AUDIO_USECS, |
michael@0 | 151 | "QUICK_BUFFERING_LOW_DATA_USECS is too large"); |
michael@0 | 152 | |
michael@0 | 153 | // This value has been chosen empirically. |
michael@0 | 154 | static const uint32_t AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS = 200000; |
michael@0 | 155 | |
michael@0 | 156 | // The amount of instability we tollerate in calls to |
michael@0 | 157 | // MediaDecoderStateMachine::UpdateEstimatedDuration(); changes of duration |
michael@0 | 158 | // less than this are ignored, as they're assumed to be the result of |
michael@0 | 159 | // instability in the duration estimation. |
michael@0 | 160 | static const int64_t ESTIMATED_DURATION_FUZZ_FACTOR_USECS = USECS_PER_S / 2; |
michael@0 | 161 | |
michael@0 | 162 | static TimeDuration UsecsToDuration(int64_t aUsecs) { |
michael@0 | 163 | return TimeDuration::FromMilliseconds(static_cast<double>(aUsecs) / USECS_PER_MS); |
michael@0 | 164 | } |
michael@0 | 165 | |
michael@0 | 166 | static int64_t DurationToUsecs(TimeDuration aDuration) { |
michael@0 | 167 | return static_cast<int64_t>(aDuration.ToSeconds() * USECS_PER_S); |
michael@0 | 168 | } |
michael@0 | 169 | |
michael@0 | 170 | MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder, |
michael@0 | 171 | MediaDecoderReader* aReader, |
michael@0 | 172 | bool aRealTime) : |
michael@0 | 173 | mDecoder(aDecoder), |
michael@0 | 174 | mState(DECODER_STATE_DECODING_METADATA), |
michael@0 | 175 | mInRunningStateMachine(false), |
michael@0 | 176 | mSyncPointInMediaStream(-1), |
michael@0 | 177 | mSyncPointInDecodedStream(-1), |
michael@0 | 178 | mResetPlayStartTime(false), |
michael@0 | 179 | mPlayDuration(0), |
michael@0 | 180 | mStartTime(-1), |
michael@0 | 181 | mEndTime(-1), |
michael@0 | 182 | mFragmentEndTime(-1), |
michael@0 | 183 | mReader(aReader), |
michael@0 | 184 | mCurrentFrameTime(0), |
michael@0 | 185 | mAudioStartTime(-1), |
michael@0 | 186 | mAudioEndTime(-1), |
michael@0 | 187 | mVideoFrameEndTime(-1), |
michael@0 | 188 | mVolume(1.0), |
michael@0 | 189 | mPlaybackRate(1.0), |
michael@0 | 190 | mPreservesPitch(true), |
michael@0 | 191 | mBasePosition(0), |
michael@0 | 192 | mAmpleVideoFrames(2), |
michael@0 | 193 | mLowAudioThresholdUsecs(LOW_AUDIO_USECS), |
michael@0 | 194 | mAmpleAudioThresholdUsecs(AMPLE_AUDIO_USECS), |
michael@0 | 195 | mDispatchedAudioDecodeTask(false), |
michael@0 | 196 | mDispatchedVideoDecodeTask(false), |
michael@0 | 197 | mIsReaderIdle(false), |
michael@0 | 198 | mAudioCaptured(false), |
michael@0 | 199 | mTransportSeekable(true), |
michael@0 | 200 | mMediaSeekable(true), |
michael@0 | 201 | mPositionChangeQueued(false), |
michael@0 | 202 | mAudioCompleted(false), |
michael@0 | 203 | mGotDurationFromMetaData(false), |
michael@0 | 204 | mDispatchedEventToDecode(false), |
michael@0 | 205 | mStopAudioThread(true), |
michael@0 | 206 | mQuickBuffering(false), |
michael@0 | 207 | mMinimizePreroll(false), |
michael@0 | 208 | mDecodeThreadWaiting(false), |
michael@0 | 209 | mRealTime(aRealTime), |
michael@0 | 210 | mLastFrameStatus(MediaDecoderOwner::NEXT_FRAME_UNINITIALIZED), |
michael@0 | 211 | mTimerId(0) |
michael@0 | 212 | { |
michael@0 | 213 | MOZ_COUNT_CTOR(MediaDecoderStateMachine); |
michael@0 | 214 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 215 | |
michael@0 | 216 | // Only enable realtime mode when "media.realtime_decoder.enabled" is true. |
michael@0 | 217 | if (Preferences::GetBool("media.realtime_decoder.enabled", false) == false) |
michael@0 | 218 | mRealTime = false; |
michael@0 | 219 | |
michael@0 | 220 | mAmpleVideoFrames = |
michael@0 | 221 | std::max<uint32_t>(Preferences::GetUint("media.video-queue.default-size", 10), 3); |
michael@0 | 222 | |
michael@0 | 223 | mBufferingWait = mRealTime ? 0 : BUFFERING_WAIT_S; |
michael@0 | 224 | mLowDataThresholdUsecs = mRealTime ? 0 : LOW_DATA_THRESHOLD_USECS; |
michael@0 | 225 | |
michael@0 | 226 | mVideoPrerollFrames = mRealTime ? 0 : mAmpleVideoFrames / 2; |
michael@0 | 227 | mAudioPrerollUsecs = mRealTime ? 0 : LOW_AUDIO_USECS * 2; |
michael@0 | 228 | |
michael@0 | 229 | #ifdef XP_WIN |
michael@0 | 230 | // Ensure high precision timers are enabled on Windows, otherwise the state |
michael@0 | 231 | // machine thread isn't woken up at reliable intervals to set the next frame, |
michael@0 | 232 | // and we drop frames while painting. Note that multiple calls to this |
michael@0 | 233 | // function per-process is OK, provided each call is matched by a corresponding |
michael@0 | 234 | // timeEndPeriod() call. |
michael@0 | 235 | timeBeginPeriod(1); |
michael@0 | 236 | #endif |
michael@0 | 237 | } |
michael@0 | 238 | |
michael@0 | 239 | MediaDecoderStateMachine::~MediaDecoderStateMachine() |
michael@0 | 240 | { |
michael@0 | 241 | MOZ_ASSERT(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 242 | MOZ_COUNT_DTOR(MediaDecoderStateMachine); |
michael@0 | 243 | NS_ASSERTION(!mPendingWakeDecoder.get(), |
michael@0 | 244 | "WakeDecoder should have been revoked already"); |
michael@0 | 245 | |
michael@0 | 246 | MOZ_ASSERT(!mDecodeTaskQueue, "Should be released in SHUTDOWN"); |
michael@0 | 247 | // No need to cancel the timer here for we've done that in SHUTDOWN. |
michael@0 | 248 | MOZ_ASSERT(!mTimer, "Should be released in SHUTDOWN"); |
michael@0 | 249 | mReader = nullptr; |
michael@0 | 250 | |
michael@0 | 251 | #ifdef XP_WIN |
michael@0 | 252 | timeEndPeriod(1); |
michael@0 | 253 | #endif |
michael@0 | 254 | } |
michael@0 | 255 | |
michael@0 | 256 | bool MediaDecoderStateMachine::HasFutureAudio() { |
michael@0 | 257 | AssertCurrentThreadInMonitor(); |
michael@0 | 258 | NS_ASSERTION(HasAudio(), "Should only call HasFutureAudio() when we have audio"); |
michael@0 | 259 | // We've got audio ready to play if: |
michael@0 | 260 | // 1. We've not completed playback of audio, and |
michael@0 | 261 | // 2. we either have more than the threshold of decoded audio available, or |
michael@0 | 262 | // we've completely decoded all audio (but not finished playing it yet |
michael@0 | 263 | // as per 1). |
michael@0 | 264 | return !mAudioCompleted && |
michael@0 | 265 | (AudioDecodedUsecs() > LOW_AUDIO_USECS * mPlaybackRate || AudioQueue().IsFinished()); |
michael@0 | 266 | } |
michael@0 | 267 | |
michael@0 | 268 | bool MediaDecoderStateMachine::HaveNextFrameData() { |
michael@0 | 269 | AssertCurrentThreadInMonitor(); |
michael@0 | 270 | return (!HasAudio() || HasFutureAudio()) && |
michael@0 | 271 | (!HasVideo() || VideoQueue().GetSize() > 0); |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | int64_t MediaDecoderStateMachine::GetDecodedAudioDuration() { |
michael@0 | 275 | NS_ASSERTION(OnDecodeThread() || OnStateMachineThread(), |
michael@0 | 276 | "Should be on decode thread or state machine thread"); |
michael@0 | 277 | AssertCurrentThreadInMonitor(); |
michael@0 | 278 | int64_t audioDecoded = AudioQueue().Duration(); |
michael@0 | 279 | if (mAudioEndTime != -1) { |
michael@0 | 280 | audioDecoded += mAudioEndTime - GetMediaTime(); |
michael@0 | 281 | } |
michael@0 | 282 | return audioDecoded; |
michael@0 | 283 | } |
michael@0 | 284 | |
michael@0 | 285 | void MediaDecoderStateMachine::SendStreamAudio(AudioData* aAudio, |
michael@0 | 286 | DecodedStreamData* aStream, |
michael@0 | 287 | AudioSegment* aOutput) |
michael@0 | 288 | { |
michael@0 | 289 | NS_ASSERTION(OnDecodeThread() || |
michael@0 | 290 | OnStateMachineThread(), "Should be on decode thread or state machine thread"); |
michael@0 | 291 | AssertCurrentThreadInMonitor(); |
michael@0 | 292 | |
michael@0 | 293 | if (aAudio->mTime <= aStream->mLastAudioPacketTime) { |
michael@0 | 294 | // ignore packet that we've already processed |
michael@0 | 295 | return; |
michael@0 | 296 | } |
michael@0 | 297 | aStream->mLastAudioPacketTime = aAudio->mTime; |
michael@0 | 298 | aStream->mLastAudioPacketEndTime = aAudio->GetEndTime(); |
michael@0 | 299 | |
michael@0 | 300 | // This logic has to mimic AudioLoop closely to make sure we write |
michael@0 | 301 | // the exact same silences |
michael@0 | 302 | CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudio.mRate, |
michael@0 | 303 | aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten; |
michael@0 | 304 | CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudio.mRate, aAudio->mTime); |
michael@0 | 305 | if (!audioWrittenOffset.isValid() || !frameOffset.isValid()) |
michael@0 | 306 | return; |
michael@0 | 307 | if (audioWrittenOffset.value() < frameOffset.value()) { |
michael@0 | 308 | // Write silence to catch up |
michael@0 | 309 | VERBOSE_LOG("writing %d frames of silence to MediaStream", |
michael@0 | 310 | int32_t(frameOffset.value() - audioWrittenOffset.value())); |
michael@0 | 311 | AudioSegment silence; |
michael@0 | 312 | silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value()); |
michael@0 | 313 | aStream->mAudioFramesWritten += silence.GetDuration(); |
michael@0 | 314 | aOutput->AppendFrom(&silence); |
michael@0 | 315 | } |
michael@0 | 316 | |
michael@0 | 317 | int64_t offset; |
michael@0 | 318 | if (aStream->mAudioFramesWritten == 0) { |
michael@0 | 319 | NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(), |
michael@0 | 320 | "Otherwise we'd have taken the write-silence path"); |
michael@0 | 321 | // We're starting in the middle of a packet. Split the packet. |
michael@0 | 322 | offset = audioWrittenOffset.value() - frameOffset.value(); |
michael@0 | 323 | } else { |
michael@0 | 324 | // Write the entire packet. |
michael@0 | 325 | offset = 0; |
michael@0 | 326 | } |
michael@0 | 327 | |
michael@0 | 328 | if (offset >= aAudio->mFrames) |
michael@0 | 329 | return; |
michael@0 | 330 | |
michael@0 | 331 | aAudio->EnsureAudioBuffer(); |
michael@0 | 332 | nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer; |
michael@0 | 333 | AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data()); |
michael@0 | 334 | nsAutoTArray<const AudioDataValue*,2> channels; |
michael@0 | 335 | for (uint32_t i = 0; i < aAudio->mChannels; ++i) { |
michael@0 | 336 | channels.AppendElement(bufferData + i*aAudio->mFrames + offset); |
michael@0 | 337 | } |
michael@0 | 338 | aOutput->AppendFrames(buffer.forget(), channels, aAudio->mFrames); |
michael@0 | 339 | VERBOSE_LOG("writing %d frames of data to MediaStream for AudioData at %lld", |
michael@0 | 340 | aAudio->mFrames - int32_t(offset), aAudio->mTime); |
michael@0 | 341 | aStream->mAudioFramesWritten += aAudio->mFrames - int32_t(offset); |
michael@0 | 342 | } |
michael@0 | 343 | |
michael@0 | 344 | static void WriteVideoToMediaStream(layers::Image* aImage, |
michael@0 | 345 | int64_t aDuration, |
michael@0 | 346 | const IntSize& aIntrinsicSize, |
michael@0 | 347 | VideoSegment* aOutput) |
michael@0 | 348 | { |
michael@0 | 349 | nsRefPtr<layers::Image> image = aImage; |
michael@0 | 350 | aOutput->AppendFrame(image.forget(), aDuration, aIntrinsicSize); |
michael@0 | 351 | } |
michael@0 | 352 | |
michael@0 | 353 | static const TrackID TRACK_AUDIO = 1; |
michael@0 | 354 | static const TrackID TRACK_VIDEO = 2; |
michael@0 | 355 | static const TrackRate RATE_VIDEO = USECS_PER_S; |
michael@0 | 356 | |
michael@0 | 357 | void MediaDecoderStateMachine::SendStreamData() |
michael@0 | 358 | { |
michael@0 | 359 | NS_ASSERTION(OnDecodeThread() || |
michael@0 | 360 | OnStateMachineThread(), "Should be on decode thread or state machine thread"); |
michael@0 | 361 | AssertCurrentThreadInMonitor(); |
michael@0 | 362 | |
michael@0 | 363 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 364 | if (!stream) |
michael@0 | 365 | return; |
michael@0 | 366 | |
michael@0 | 367 | if (mState == DECODER_STATE_DECODING_METADATA) |
michael@0 | 368 | return; |
michael@0 | 369 | |
michael@0 | 370 | // If there's still an audio thread alive, then we can't send any stream |
michael@0 | 371 | // data yet since both SendStreamData and the audio thread want to be in |
michael@0 | 372 | // charge of popping the audio queue. We're waiting for the audio thread |
michael@0 | 373 | // to die before sending anything to our stream. |
michael@0 | 374 | if (mAudioThread) |
michael@0 | 375 | return; |
michael@0 | 376 | |
michael@0 | 377 | int64_t minLastAudioPacketTime = INT64_MAX; |
michael@0 | 378 | bool finished = |
michael@0 | 379 | (!mInfo.HasAudio() || AudioQueue().IsFinished()) && |
michael@0 | 380 | (!mInfo.HasVideo() || VideoQueue().IsFinished()); |
michael@0 | 381 | if (mDecoder->IsSameOriginMedia()) { |
michael@0 | 382 | SourceMediaStream* mediaStream = stream->mStream; |
michael@0 | 383 | StreamTime endPosition = 0; |
michael@0 | 384 | |
michael@0 | 385 | if (!stream->mStreamInitialized) { |
michael@0 | 386 | if (mInfo.HasAudio()) { |
michael@0 | 387 | AudioSegment* audio = new AudioSegment(); |
michael@0 | 388 | mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudio.mRate, 0, audio); |
michael@0 | 389 | stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, |
michael@0 | 390 | GetStateMachineThread(), GetWakeDecoderRunnable()); |
michael@0 | 391 | } |
michael@0 | 392 | if (mInfo.HasVideo()) { |
michael@0 | 393 | VideoSegment* video = new VideoSegment(); |
michael@0 | 394 | mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video); |
michael@0 | 395 | stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, |
michael@0 | 396 | GetStateMachineThread(), GetWakeDecoderRunnable()); |
michael@0 | 397 | } |
michael@0 | 398 | stream->mStreamInitialized = true; |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | if (mInfo.HasAudio()) { |
michael@0 | 402 | nsAutoTArray<AudioData*,10> audio; |
michael@0 | 403 | // It's OK to hold references to the AudioData because while audio |
michael@0 | 404 | // is captured, only the decoder thread pops from the queue (see below). |
michael@0 | 405 | AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio); |
michael@0 | 406 | AudioSegment output; |
michael@0 | 407 | for (uint32_t i = 0; i < audio.Length(); ++i) { |
michael@0 | 408 | SendStreamAudio(audio[i], stream, &output); |
michael@0 | 409 | } |
michael@0 | 410 | if (output.GetDuration() > 0) { |
michael@0 | 411 | mediaStream->AppendToTrack(TRACK_AUDIO, &output); |
michael@0 | 412 | } |
michael@0 | 413 | if (AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) { |
michael@0 | 414 | mediaStream->EndTrack(TRACK_AUDIO); |
michael@0 | 415 | stream->mHaveSentFinishAudio = true; |
michael@0 | 416 | } |
michael@0 | 417 | minLastAudioPacketTime = std::min(minLastAudioPacketTime, stream->mLastAudioPacketTime); |
michael@0 | 418 | endPosition = std::max(endPosition, |
michael@0 | 419 | TicksToTimeRoundDown(mInfo.mAudio.mRate, stream->mAudioFramesWritten)); |
michael@0 | 420 | } |
michael@0 | 421 | |
michael@0 | 422 | if (mInfo.HasVideo()) { |
michael@0 | 423 | nsAutoTArray<VideoData*,10> video; |
michael@0 | 424 | // It's OK to hold references to the VideoData only the decoder thread |
michael@0 | 425 | // pops from the queue. |
michael@0 | 426 | VideoQueue().GetElementsAfter(stream->mNextVideoTime, &video); |
michael@0 | 427 | VideoSegment output; |
michael@0 | 428 | for (uint32_t i = 0; i < video.Length(); ++i) { |
michael@0 | 429 | VideoData* v = video[i]; |
michael@0 | 430 | if (stream->mNextVideoTime < v->mTime) { |
michael@0 | 431 | VERBOSE_LOG("writing last video to MediaStream %p for %lldus", |
michael@0 | 432 | mediaStream, v->mTime - stream->mNextVideoTime); |
michael@0 | 433 | // Write last video frame to catch up. mLastVideoImage can be null here |
michael@0 | 434 | // which is fine, it just means there's no video. |
michael@0 | 435 | WriteVideoToMediaStream(stream->mLastVideoImage, |
michael@0 | 436 | v->mTime - stream->mNextVideoTime, stream->mLastVideoImageDisplaySize, |
michael@0 | 437 | &output); |
michael@0 | 438 | stream->mNextVideoTime = v->mTime; |
michael@0 | 439 | } |
michael@0 | 440 | if (stream->mNextVideoTime < v->GetEndTime()) { |
michael@0 | 441 | VERBOSE_LOG("writing video frame %lldus to MediaStream %p for %lldus", |
michael@0 | 442 | v->mTime, mediaStream, v->GetEndTime() - stream->mNextVideoTime); |
michael@0 | 443 | WriteVideoToMediaStream(v->mImage, |
michael@0 | 444 | v->GetEndTime() - stream->mNextVideoTime, v->mDisplay, |
michael@0 | 445 | &output); |
michael@0 | 446 | stream->mNextVideoTime = v->GetEndTime(); |
michael@0 | 447 | stream->mLastVideoImage = v->mImage; |
michael@0 | 448 | stream->mLastVideoImageDisplaySize = v->mDisplay; |
michael@0 | 449 | } else { |
michael@0 | 450 | VERBOSE_LOG("skipping writing video frame %lldus (end %lldus) to MediaStream", |
michael@0 | 451 | v->mTime, v->GetEndTime()); |
michael@0 | 452 | } |
michael@0 | 453 | } |
michael@0 | 454 | if (output.GetDuration() > 0) { |
michael@0 | 455 | mediaStream->AppendToTrack(TRACK_VIDEO, &output); |
michael@0 | 456 | } |
michael@0 | 457 | if (VideoQueue().IsFinished() && !stream->mHaveSentFinishVideo) { |
michael@0 | 458 | mediaStream->EndTrack(TRACK_VIDEO); |
michael@0 | 459 | stream->mHaveSentFinishVideo = true; |
michael@0 | 460 | } |
michael@0 | 461 | endPosition = std::max(endPosition, |
michael@0 | 462 | TicksToTimeRoundDown(RATE_VIDEO, stream->mNextVideoTime - stream->mInitialTime)); |
michael@0 | 463 | } |
michael@0 | 464 | |
michael@0 | 465 | if (!stream->mHaveSentFinish) { |
michael@0 | 466 | stream->mStream->AdvanceKnownTracksTime(endPosition); |
michael@0 | 467 | } |
michael@0 | 468 | |
michael@0 | 469 | if (finished && !stream->mHaveSentFinish) { |
michael@0 | 470 | stream->mHaveSentFinish = true; |
michael@0 | 471 | stream->mStream->Finish(); |
michael@0 | 472 | } |
michael@0 | 473 | } |
michael@0 | 474 | |
michael@0 | 475 | if (mAudioCaptured) { |
michael@0 | 476 | // Discard audio packets that are no longer needed. |
michael@0 | 477 | while (true) { |
michael@0 | 478 | const AudioData* a = AudioQueue().PeekFront(); |
michael@0 | 479 | // Packet times are not 100% reliable so this may discard packets that |
michael@0 | 480 | // actually contain data for mCurrentFrameTime. This means if someone might |
michael@0 | 481 | // create a new output stream and we actually don't have the audio for the |
michael@0 | 482 | // very start. That's OK, we'll play silence instead for a brief moment. |
michael@0 | 483 | // That's OK. Seeking to this time would have a similar issue for such |
michael@0 | 484 | // badly muxed resources. |
michael@0 | 485 | if (!a || a->GetEndTime() >= minLastAudioPacketTime) |
michael@0 | 486 | break; |
michael@0 | 487 | mAudioEndTime = std::max(mAudioEndTime, a->GetEndTime()); |
michael@0 | 488 | delete AudioQueue().PopFront(); |
michael@0 | 489 | } |
michael@0 | 490 | |
michael@0 | 491 | if (finished) { |
michael@0 | 492 | mAudioCompleted = true; |
michael@0 | 493 | UpdateReadyState(); |
michael@0 | 494 | } |
michael@0 | 495 | } |
michael@0 | 496 | } |
michael@0 | 497 | |
michael@0 | 498 | MediaDecoderStateMachine::WakeDecoderRunnable* |
michael@0 | 499 | MediaDecoderStateMachine::GetWakeDecoderRunnable() |
michael@0 | 500 | { |
michael@0 | 501 | AssertCurrentThreadInMonitor(); |
michael@0 | 502 | |
michael@0 | 503 | if (!mPendingWakeDecoder.get()) { |
michael@0 | 504 | mPendingWakeDecoder = new WakeDecoderRunnable(this); |
michael@0 | 505 | } |
michael@0 | 506 | return mPendingWakeDecoder.get(); |
michael@0 | 507 | } |
michael@0 | 508 | |
michael@0 | 509 | bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs) |
michael@0 | 510 | { |
michael@0 | 511 | AssertCurrentThreadInMonitor(); |
michael@0 | 512 | |
michael@0 | 513 | if (AudioQueue().GetSize() == 0 || |
michael@0 | 514 | GetDecodedAudioDuration() < aAmpleAudioUSecs) { |
michael@0 | 515 | return false; |
michael@0 | 516 | } |
michael@0 | 517 | if (!mAudioCaptured) { |
michael@0 | 518 | return true; |
michael@0 | 519 | } |
michael@0 | 520 | |
michael@0 | 521 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 522 | if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishAudio) { |
michael@0 | 523 | if (!stream->mStream->HaveEnoughBuffered(TRACK_AUDIO)) { |
michael@0 | 524 | return false; |
michael@0 | 525 | } |
michael@0 | 526 | stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_AUDIO, |
michael@0 | 527 | GetStateMachineThread(), GetWakeDecoderRunnable()); |
michael@0 | 528 | } |
michael@0 | 529 | |
michael@0 | 530 | return true; |
michael@0 | 531 | } |
michael@0 | 532 | |
michael@0 | 533 | bool MediaDecoderStateMachine::HaveEnoughDecodedVideo() |
michael@0 | 534 | { |
michael@0 | 535 | AssertCurrentThreadInMonitor(); |
michael@0 | 536 | |
michael@0 | 537 | if (static_cast<uint32_t>(VideoQueue().GetSize()) < mAmpleVideoFrames * mPlaybackRate) { |
michael@0 | 538 | return false; |
michael@0 | 539 | } |
michael@0 | 540 | |
michael@0 | 541 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 542 | if (stream && stream->mStreamInitialized && !stream->mHaveSentFinishVideo) { |
michael@0 | 543 | if (!stream->mStream->HaveEnoughBuffered(TRACK_VIDEO)) { |
michael@0 | 544 | return false; |
michael@0 | 545 | } |
michael@0 | 546 | stream->mStream->DispatchWhenNotEnoughBuffered(TRACK_VIDEO, |
michael@0 | 547 | GetStateMachineThread(), GetWakeDecoderRunnable()); |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | return true; |
michael@0 | 551 | } |
michael@0 | 552 | |
michael@0 | 553 | bool |
michael@0 | 554 | MediaDecoderStateMachine::NeedToDecodeVideo() |
michael@0 | 555 | { |
michael@0 | 556 | AssertCurrentThreadInMonitor(); |
michael@0 | 557 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 558 | "Should be on state machine or decode thread."); |
michael@0 | 559 | return mIsVideoDecoding && |
michael@0 | 560 | !mMinimizePreroll && |
michael@0 | 561 | !HaveEnoughDecodedVideo(); |
michael@0 | 562 | } |
michael@0 | 563 | |
michael@0 | 564 | void |
michael@0 | 565 | MediaDecoderStateMachine::DecodeVideo() |
michael@0 | 566 | { |
michael@0 | 567 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 568 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 569 | |
michael@0 | 570 | if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) { |
michael@0 | 571 | mDispatchedVideoDecodeTask = false; |
michael@0 | 572 | return; |
michael@0 | 573 | } |
michael@0 | 574 | EnsureActive(); |
michael@0 | 575 | |
michael@0 | 576 | // We don't want to consider skipping to the next keyframe if we've |
michael@0 | 577 | // only just started up the decode loop, so wait until we've decoded |
michael@0 | 578 | // some frames before enabling the keyframe skip logic on video. |
michael@0 | 579 | if (mIsVideoPrerolling && |
michael@0 | 580 | (static_cast<uint32_t>(VideoQueue().GetSize()) |
michael@0 | 581 | >= mVideoPrerollFrames * mPlaybackRate)) |
michael@0 | 582 | { |
michael@0 | 583 | mIsVideoPrerolling = false; |
michael@0 | 584 | } |
michael@0 | 585 | |
michael@0 | 586 | // We'll skip the video decode to the nearest keyframe if we're low on |
michael@0 | 587 | // audio, or if we're low on video, provided we're not running low on |
michael@0 | 588 | // data to decode. If we're running low on downloaded data to decode, |
michael@0 | 589 | // we won't start keyframe skipping, as we'll be pausing playback to buffer |
michael@0 | 590 | // soon anyway and we'll want to be able to display frames immediately |
michael@0 | 591 | // after buffering finishes. |
michael@0 | 592 | if (mState == DECODER_STATE_DECODING && |
michael@0 | 593 | !mSkipToNextKeyFrame && |
michael@0 | 594 | mIsVideoDecoding && |
michael@0 | 595 | ((!mIsAudioPrerolling && mIsAudioDecoding && |
michael@0 | 596 | GetDecodedAudioDuration() < mLowAudioThresholdUsecs * mPlaybackRate) || |
michael@0 | 597 | (!mIsVideoPrerolling && mIsVideoDecoding && |
michael@0 | 598 | // don't skip frame when |clock time| <= |mVideoFrameEndTime| for |
michael@0 | 599 | // we are still in the safe range without underrunning video frames |
michael@0 | 600 | GetClock() > mVideoFrameEndTime && |
michael@0 | 601 | (static_cast<uint32_t>(VideoQueue().GetSize()) |
michael@0 | 602 | < LOW_VIDEO_FRAMES * mPlaybackRate))) && |
michael@0 | 603 | !HasLowUndecodedData()) |
michael@0 | 604 | { |
michael@0 | 605 | mSkipToNextKeyFrame = true; |
michael@0 | 606 | DECODER_LOG(PR_LOG_DEBUG, "Skipping video decode to the next keyframe"); |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | // Time the video decode, so that if it's slow, we can increase our low |
michael@0 | 610 | // audio threshold to reduce the chance of an audio underrun while we're |
michael@0 | 611 | // waiting for a video decode to complete. |
michael@0 | 612 | TimeDuration decodeTime; |
michael@0 | 613 | { |
michael@0 | 614 | int64_t currentTime = GetMediaTime(); |
michael@0 | 615 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 616 | TimeStamp start = TimeStamp::Now(); |
michael@0 | 617 | mIsVideoDecoding = mReader->DecodeVideoFrame(mSkipToNextKeyFrame, currentTime); |
michael@0 | 618 | decodeTime = TimeStamp::Now() - start; |
michael@0 | 619 | } |
michael@0 | 620 | if (!mIsVideoDecoding) { |
michael@0 | 621 | // Playback ended for this stream, close the sample queue. |
michael@0 | 622 | VideoQueue().Finish(); |
michael@0 | 623 | CheckIfDecodeComplete(); |
michael@0 | 624 | } |
michael@0 | 625 | |
michael@0 | 626 | if (THRESHOLD_FACTOR * DurationToUsecs(decodeTime) > mLowAudioThresholdUsecs && |
michael@0 | 627 | !HasLowUndecodedData()) |
michael@0 | 628 | { |
michael@0 | 629 | mLowAudioThresholdUsecs = |
michael@0 | 630 | std::min(THRESHOLD_FACTOR * DurationToUsecs(decodeTime), AMPLE_AUDIO_USECS); |
michael@0 | 631 | mAmpleAudioThresholdUsecs = std::max(THRESHOLD_FACTOR * mLowAudioThresholdUsecs, |
michael@0 | 632 | mAmpleAudioThresholdUsecs); |
michael@0 | 633 | DECODER_LOG(PR_LOG_DEBUG, "Slow video decode, set mLowAudioThresholdUsecs=%lld mAmpleAudioThresholdUsecs=%lld", |
michael@0 | 634 | mLowAudioThresholdUsecs, mAmpleAudioThresholdUsecs); |
michael@0 | 635 | } |
michael@0 | 636 | |
michael@0 | 637 | SendStreamData(); |
michael@0 | 638 | |
michael@0 | 639 | // The ready state can change when we've decoded data, so update the |
michael@0 | 640 | // ready state, so that DOM events can fire. |
michael@0 | 641 | UpdateReadyState(); |
michael@0 | 642 | |
michael@0 | 643 | mDispatchedVideoDecodeTask = false; |
michael@0 | 644 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 645 | } |
michael@0 | 646 | |
michael@0 | 647 | bool |
michael@0 | 648 | MediaDecoderStateMachine::NeedToDecodeAudio() |
michael@0 | 649 | { |
michael@0 | 650 | AssertCurrentThreadInMonitor(); |
michael@0 | 651 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 652 | "Should be on state machine or decode thread."); |
michael@0 | 653 | return mIsAudioDecoding && |
michael@0 | 654 | !mMinimizePreroll && |
michael@0 | 655 | !HaveEnoughDecodedAudio(mAmpleAudioThresholdUsecs * mPlaybackRate); |
michael@0 | 656 | } |
michael@0 | 657 | |
michael@0 | 658 | void |
michael@0 | 659 | MediaDecoderStateMachine::DecodeAudio() |
michael@0 | 660 | { |
michael@0 | 661 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 662 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 663 | |
michael@0 | 664 | if (mState != DECODER_STATE_DECODING && mState != DECODER_STATE_BUFFERING) { |
michael@0 | 665 | mDispatchedAudioDecodeTask = false; |
michael@0 | 666 | return; |
michael@0 | 667 | } |
michael@0 | 668 | EnsureActive(); |
michael@0 | 669 | |
michael@0 | 670 | // We don't want to consider skipping to the next keyframe if we've |
michael@0 | 671 | // only just started up the decode loop, so wait until we've decoded |
michael@0 | 672 | // some audio data before enabling the keyframe skip logic on audio. |
michael@0 | 673 | if (mIsAudioPrerolling && |
michael@0 | 674 | GetDecodedAudioDuration() >= mAudioPrerollUsecs * mPlaybackRate) { |
michael@0 | 675 | mIsAudioPrerolling = false; |
michael@0 | 676 | } |
michael@0 | 677 | |
michael@0 | 678 | { |
michael@0 | 679 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 680 | mIsAudioDecoding = mReader->DecodeAudioData(); |
michael@0 | 681 | } |
michael@0 | 682 | if (!mIsAudioDecoding) { |
michael@0 | 683 | // Playback ended for this stream, close the sample queue. |
michael@0 | 684 | AudioQueue().Finish(); |
michael@0 | 685 | CheckIfDecodeComplete(); |
michael@0 | 686 | } |
michael@0 | 687 | |
michael@0 | 688 | SendStreamData(); |
michael@0 | 689 | |
michael@0 | 690 | // Notify to ensure that the AudioLoop() is not waiting, in case it was |
michael@0 | 691 | // waiting for more audio to be decoded. |
michael@0 | 692 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 693 | |
michael@0 | 694 | // The ready state can change when we've decoded data, so update the |
michael@0 | 695 | // ready state, so that DOM events can fire. |
michael@0 | 696 | UpdateReadyState(); |
michael@0 | 697 | |
michael@0 | 698 | mDispatchedAudioDecodeTask = false; |
michael@0 | 699 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 700 | } |
michael@0 | 701 | |
michael@0 | 702 | void |
michael@0 | 703 | MediaDecoderStateMachine::CheckIfDecodeComplete() |
michael@0 | 704 | { |
michael@0 | 705 | AssertCurrentThreadInMonitor(); |
michael@0 | 706 | if (mState == DECODER_STATE_SHUTDOWN || |
michael@0 | 707 | mState == DECODER_STATE_SEEKING || |
michael@0 | 708 | mState == DECODER_STATE_COMPLETED) { |
michael@0 | 709 | // Don't change our state if we've already been shutdown, or we're seeking, |
michael@0 | 710 | // since we don't want to abort the shutdown or seek processes. |
michael@0 | 711 | return; |
michael@0 | 712 | } |
michael@0 | 713 | MOZ_ASSERT(!AudioQueue().IsFinished() || !mIsAudioDecoding); |
michael@0 | 714 | MOZ_ASSERT(!VideoQueue().IsFinished() || !mIsVideoDecoding); |
michael@0 | 715 | if (!mIsVideoDecoding && !mIsAudioDecoding) { |
michael@0 | 716 | // We've finished decoding all active streams, |
michael@0 | 717 | // so move to COMPLETED state. |
michael@0 | 718 | mState = DECODER_STATE_COMPLETED; |
michael@0 | 719 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 720 | ScheduleStateMachine(); |
michael@0 | 721 | } |
michael@0 | 722 | DECODER_LOG(PR_LOG_DEBUG, "CheckIfDecodeComplete %scompleted", |
michael@0 | 723 | ((mState == DECODER_STATE_COMPLETED) ? "" : "NOT ")); |
michael@0 | 724 | } |
michael@0 | 725 | |
michael@0 | 726 | bool MediaDecoderStateMachine::IsPlaying() |
michael@0 | 727 | { |
michael@0 | 728 | AssertCurrentThreadInMonitor(); |
michael@0 | 729 | |
michael@0 | 730 | return !mPlayStartTime.IsNull(); |
michael@0 | 731 | } |
michael@0 | 732 | |
michael@0 | 733 | // If we have already written enough frames to the AudioStream, start the |
michael@0 | 734 | // playback. |
michael@0 | 735 | static void |
michael@0 | 736 | StartAudioStreamPlaybackIfNeeded(AudioStream* aStream) |
michael@0 | 737 | { |
michael@0 | 738 | // We want to have enough data in the buffer to start the stream. |
michael@0 | 739 | if (static_cast<double>(aStream->GetWritten()) / aStream->GetRate() >= |
michael@0 | 740 | static_cast<double>(AUDIOSTREAM_MIN_WRITE_BEFORE_START_USECS) / USECS_PER_S) { |
michael@0 | 741 | aStream->Start(); |
michael@0 | 742 | } |
michael@0 | 743 | } |
michael@0 | 744 | |
michael@0 | 745 | static void WriteSilence(AudioStream* aStream, uint32_t aFrames) |
michael@0 | 746 | { |
michael@0 | 747 | uint32_t numSamples = aFrames * aStream->GetChannels(); |
michael@0 | 748 | nsAutoTArray<AudioDataValue, 1000> buf; |
michael@0 | 749 | buf.SetLength(numSamples); |
michael@0 | 750 | memset(buf.Elements(), 0, numSamples * sizeof(AudioDataValue)); |
michael@0 | 751 | aStream->Write(buf.Elements(), aFrames); |
michael@0 | 752 | |
michael@0 | 753 | StartAudioStreamPlaybackIfNeeded(aStream); |
michael@0 | 754 | } |
michael@0 | 755 | |
michael@0 | 756 | void MediaDecoderStateMachine::AudioLoop() |
michael@0 | 757 | { |
michael@0 | 758 | NS_ASSERTION(OnAudioThread(), "Should be on audio thread."); |
michael@0 | 759 | DECODER_LOG(PR_LOG_DEBUG, "Begun audio thread/loop"); |
michael@0 | 760 | int64_t audioDuration = 0; |
michael@0 | 761 | int64_t audioStartTime = -1; |
michael@0 | 762 | uint32_t channels, rate; |
michael@0 | 763 | double volume = -1; |
michael@0 | 764 | bool setVolume; |
michael@0 | 765 | double playbackRate = -1; |
michael@0 | 766 | bool setPlaybackRate; |
michael@0 | 767 | bool preservesPitch; |
michael@0 | 768 | bool setPreservesPitch; |
michael@0 | 769 | AudioChannel audioChannel; |
michael@0 | 770 | |
michael@0 | 771 | { |
michael@0 | 772 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 773 | mAudioCompleted = false; |
michael@0 | 774 | audioStartTime = mAudioStartTime; |
michael@0 | 775 | NS_ASSERTION(audioStartTime != -1, "Should have audio start time by now"); |
michael@0 | 776 | channels = mInfo.mAudio.mChannels; |
michael@0 | 777 | rate = mInfo.mAudio.mRate; |
michael@0 | 778 | |
michael@0 | 779 | audioChannel = mDecoder->GetAudioChannel(); |
michael@0 | 780 | volume = mVolume; |
michael@0 | 781 | preservesPitch = mPreservesPitch; |
michael@0 | 782 | playbackRate = mPlaybackRate; |
michael@0 | 783 | } |
michael@0 | 784 | |
michael@0 | 785 | { |
michael@0 | 786 | // AudioStream initialization can block for extended periods in unusual |
michael@0 | 787 | // circumstances, so we take care to drop the decoder monitor while |
michael@0 | 788 | // initializing. |
michael@0 | 789 | RefPtr<AudioStream> audioStream(new AudioStream()); |
michael@0 | 790 | audioStream->Init(channels, rate, audioChannel, AudioStream::HighLatency); |
michael@0 | 791 | audioStream->SetVolume(volume); |
michael@0 | 792 | if (audioStream->SetPreservesPitch(preservesPitch) != NS_OK) { |
michael@0 | 793 | NS_WARNING("Setting the pitch preservation failed at AudioLoop start."); |
michael@0 | 794 | } |
michael@0 | 795 | if (playbackRate != 1.0) { |
michael@0 | 796 | NS_ASSERTION(playbackRate != 0, |
michael@0 | 797 | "Don't set the playbackRate to 0 on an AudioStream."); |
michael@0 | 798 | if (audioStream->SetPlaybackRate(playbackRate) != NS_OK) { |
michael@0 | 799 | NS_WARNING("Setting the playback rate failed at AudioLoop start."); |
michael@0 | 800 | } |
michael@0 | 801 | } |
michael@0 | 802 | |
michael@0 | 803 | { |
michael@0 | 804 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 805 | mAudioStream = audioStream.forget(); |
michael@0 | 806 | } |
michael@0 | 807 | } |
michael@0 | 808 | |
michael@0 | 809 | while (1) { |
michael@0 | 810 | // Wait while we're not playing, and we're not shutting down, or we're |
michael@0 | 811 | // playing and we've got no audio to play. |
michael@0 | 812 | { |
michael@0 | 813 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 814 | NS_ASSERTION(mState != DECODER_STATE_DECODING_METADATA, |
michael@0 | 815 | "Should have meta data before audio started playing."); |
michael@0 | 816 | while (mState != DECODER_STATE_SHUTDOWN && |
michael@0 | 817 | !mStopAudioThread && |
michael@0 | 818 | (!IsPlaying() || |
michael@0 | 819 | mState == DECODER_STATE_BUFFERING || |
michael@0 | 820 | (AudioQueue().GetSize() == 0 && |
michael@0 | 821 | !AudioQueue().AtEndOfStream()))) |
michael@0 | 822 | { |
michael@0 | 823 | if (!IsPlaying() && !mAudioStream->IsPaused()) { |
michael@0 | 824 | mAudioStream->Pause(); |
michael@0 | 825 | } |
michael@0 | 826 | mon.Wait(); |
michael@0 | 827 | } |
michael@0 | 828 | |
michael@0 | 829 | // If we're shutting down, break out and exit the audio thread. |
michael@0 | 830 | // Also break out if audio is being captured. |
michael@0 | 831 | if (mState == DECODER_STATE_SHUTDOWN || |
michael@0 | 832 | mStopAudioThread || |
michael@0 | 833 | AudioQueue().AtEndOfStream()) |
michael@0 | 834 | { |
michael@0 | 835 | break; |
michael@0 | 836 | } |
michael@0 | 837 | |
michael@0 | 838 | // We only want to go to the expense of changing the volume if |
michael@0 | 839 | // the volume has changed. |
michael@0 | 840 | setVolume = volume != mVolume; |
michael@0 | 841 | volume = mVolume; |
michael@0 | 842 | |
michael@0 | 843 | // Same for the playbackRate. |
michael@0 | 844 | setPlaybackRate = playbackRate != mPlaybackRate; |
michael@0 | 845 | playbackRate = mPlaybackRate; |
michael@0 | 846 | |
michael@0 | 847 | // Same for the pitch preservation. |
michael@0 | 848 | setPreservesPitch = preservesPitch != mPreservesPitch; |
michael@0 | 849 | preservesPitch = mPreservesPitch; |
michael@0 | 850 | |
michael@0 | 851 | if (IsPlaying() && mAudioStream->IsPaused()) { |
michael@0 | 852 | mAudioStream->Resume(); |
michael@0 | 853 | } |
michael@0 | 854 | } |
michael@0 | 855 | |
michael@0 | 856 | if (setVolume) { |
michael@0 | 857 | mAudioStream->SetVolume(volume); |
michael@0 | 858 | } |
michael@0 | 859 | if (setPlaybackRate) { |
michael@0 | 860 | NS_ASSERTION(playbackRate != 0, |
michael@0 | 861 | "Don't set the playbackRate to 0 in the AudioStreams"); |
michael@0 | 862 | if (mAudioStream->SetPlaybackRate(playbackRate) != NS_OK) { |
michael@0 | 863 | NS_WARNING("Setting the playback rate failed in AudioLoop."); |
michael@0 | 864 | } |
michael@0 | 865 | } |
michael@0 | 866 | if (setPreservesPitch) { |
michael@0 | 867 | if (mAudioStream->SetPreservesPitch(preservesPitch) != NS_OK) { |
michael@0 | 868 | NS_WARNING("Setting the pitch preservation failed in AudioLoop."); |
michael@0 | 869 | } |
michael@0 | 870 | } |
michael@0 | 871 | NS_ASSERTION(AudioQueue().GetSize() > 0, |
michael@0 | 872 | "Should have data to play"); |
michael@0 | 873 | // See if there's a gap in the audio. If there is, push silence into the |
michael@0 | 874 | // audio hardware, so we can play across the gap. |
michael@0 | 875 | const AudioData* s = AudioQueue().PeekFront(); |
michael@0 | 876 | |
michael@0 | 877 | // Calculate the number of frames that have been pushed onto the audio |
michael@0 | 878 | // hardware. |
michael@0 | 879 | CheckedInt64 playedFrames = UsecsToFrames(audioStartTime, rate) + |
michael@0 | 880 | audioDuration; |
michael@0 | 881 | // Calculate the timestamp of the next chunk of audio in numbers of |
michael@0 | 882 | // samples. |
michael@0 | 883 | CheckedInt64 sampleTime = UsecsToFrames(s->mTime, rate); |
michael@0 | 884 | CheckedInt64 missingFrames = sampleTime - playedFrames; |
michael@0 | 885 | if (!missingFrames.isValid() || !sampleTime.isValid()) { |
michael@0 | 886 | NS_WARNING("Int overflow adding in AudioLoop()"); |
michael@0 | 887 | break; |
michael@0 | 888 | } |
michael@0 | 889 | |
michael@0 | 890 | int64_t framesWritten = 0; |
michael@0 | 891 | if (missingFrames.value() > 0) { |
michael@0 | 892 | // The next audio chunk begins some time after the end of the last chunk |
michael@0 | 893 | // we pushed to the audio hardware. We must push silence into the audio |
michael@0 | 894 | // hardware so that the next audio chunk begins playback at the correct |
michael@0 | 895 | // time. |
michael@0 | 896 | missingFrames = std::min<int64_t>(UINT32_MAX, missingFrames.value()); |
michael@0 | 897 | VERBOSE_LOG("playing %d frames of silence", int32_t(missingFrames.value())); |
michael@0 | 898 | framesWritten = PlaySilence(static_cast<uint32_t>(missingFrames.value()), |
michael@0 | 899 | channels, playedFrames.value()); |
michael@0 | 900 | } else { |
michael@0 | 901 | framesWritten = PlayFromAudioQueue(sampleTime.value(), channels); |
michael@0 | 902 | } |
michael@0 | 903 | audioDuration += framesWritten; |
michael@0 | 904 | { |
michael@0 | 905 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 906 | CheckedInt64 playedUsecs = FramesToUsecs(audioDuration, rate) + audioStartTime; |
michael@0 | 907 | if (!playedUsecs.isValid()) { |
michael@0 | 908 | NS_WARNING("Int overflow calculating audio end time"); |
michael@0 | 909 | break; |
michael@0 | 910 | } |
michael@0 | 911 | mAudioEndTime = playedUsecs.value(); |
michael@0 | 912 | } |
michael@0 | 913 | } |
michael@0 | 914 | { |
michael@0 | 915 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 916 | if (AudioQueue().AtEndOfStream() && |
michael@0 | 917 | mState != DECODER_STATE_SHUTDOWN && |
michael@0 | 918 | !mStopAudioThread) |
michael@0 | 919 | { |
michael@0 | 920 | // If the media was too short to trigger the start of the audio stream, |
michael@0 | 921 | // start it now. |
michael@0 | 922 | mAudioStream->Start(); |
michael@0 | 923 | // Last frame pushed to audio hardware, wait for the audio to finish, |
michael@0 | 924 | // before the audio thread terminates. |
michael@0 | 925 | bool seeking = false; |
michael@0 | 926 | { |
michael@0 | 927 | int64_t oldPosition = -1; |
michael@0 | 928 | int64_t position = GetMediaTime(); |
michael@0 | 929 | while (oldPosition != position && |
michael@0 | 930 | mAudioEndTime - position > 0 && |
michael@0 | 931 | mState != DECODER_STATE_SEEKING && |
michael@0 | 932 | mState != DECODER_STATE_SHUTDOWN) |
michael@0 | 933 | { |
michael@0 | 934 | const int64_t DRAIN_BLOCK_USECS = 100000; |
michael@0 | 935 | Wait(std::min(mAudioEndTime - position, DRAIN_BLOCK_USECS)); |
michael@0 | 936 | oldPosition = position; |
michael@0 | 937 | position = GetMediaTime(); |
michael@0 | 938 | } |
michael@0 | 939 | seeking = mState == DECODER_STATE_SEEKING; |
michael@0 | 940 | } |
michael@0 | 941 | |
michael@0 | 942 | if (!seeking && !mAudioStream->IsPaused()) { |
michael@0 | 943 | { |
michael@0 | 944 | ReentrantMonitorAutoExit exit(mDecoder->GetReentrantMonitor()); |
michael@0 | 945 | mAudioStream->Drain(); |
michael@0 | 946 | } |
michael@0 | 947 | } |
michael@0 | 948 | } |
michael@0 | 949 | } |
michael@0 | 950 | DECODER_LOG(PR_LOG_DEBUG, "Reached audio stream end."); |
michael@0 | 951 | { |
michael@0 | 952 | // Must hold lock while shutting down and anulling the audio stream to prevent |
michael@0 | 953 | // state machine thread trying to use it while we're destroying it. |
michael@0 | 954 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 955 | mAudioStream->Shutdown(); |
michael@0 | 956 | mAudioStream = nullptr; |
michael@0 | 957 | if (!mAudioCaptured) { |
michael@0 | 958 | mAudioCompleted = true; |
michael@0 | 959 | UpdateReadyState(); |
michael@0 | 960 | // Kick the decode thread; it may be sleeping waiting for this to finish. |
michael@0 | 961 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 962 | } |
michael@0 | 963 | } |
michael@0 | 964 | |
michael@0 | 965 | DECODER_LOG(PR_LOG_DEBUG, "Audio stream finished playing, audio thread exit"); |
michael@0 | 966 | } |
michael@0 | 967 | |
michael@0 | 968 | uint32_t MediaDecoderStateMachine::PlaySilence(uint32_t aFrames, |
michael@0 | 969 | uint32_t aChannels, |
michael@0 | 970 | uint64_t aFrameOffset) |
michael@0 | 971 | |
michael@0 | 972 | { |
michael@0 | 973 | NS_ASSERTION(OnAudioThread(), "Only call on audio thread."); |
michael@0 | 974 | NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); |
michael@0 | 975 | uint32_t maxFrames = SILENCE_BYTES_CHUNK / aChannels / sizeof(AudioDataValue); |
michael@0 | 976 | uint32_t frames = std::min(aFrames, maxFrames); |
michael@0 | 977 | WriteSilence(mAudioStream, frames); |
michael@0 | 978 | return frames; |
michael@0 | 979 | } |
michael@0 | 980 | |
michael@0 | 981 | uint32_t MediaDecoderStateMachine::PlayFromAudioQueue(uint64_t aFrameOffset, |
michael@0 | 982 | uint32_t aChannels) |
michael@0 | 983 | { |
michael@0 | 984 | NS_ASSERTION(OnAudioThread(), "Only call on audio thread."); |
michael@0 | 985 | NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused"); |
michael@0 | 986 | nsAutoPtr<AudioData> audio(AudioQueue().PopFront()); |
michael@0 | 987 | { |
michael@0 | 988 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 989 | NS_WARN_IF_FALSE(IsPlaying(), "Should be playing"); |
michael@0 | 990 | // Awaken the decode loop if it's waiting for space to free up in the |
michael@0 | 991 | // audio queue. |
michael@0 | 992 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 993 | } |
michael@0 | 994 | int64_t offset = -1; |
michael@0 | 995 | uint32_t frames = 0; |
michael@0 | 996 | VERBOSE_LOG("playing %d frames of data to stream for AudioData at %lld", |
michael@0 | 997 | audio->mFrames, audio->mTime); |
michael@0 | 998 | mAudioStream->Write(audio->mAudioData, |
michael@0 | 999 | audio->mFrames); |
michael@0 | 1000 | |
michael@0 | 1001 | aChannels = mAudioStream->GetOutChannels(); |
michael@0 | 1002 | |
michael@0 | 1003 | StartAudioStreamPlaybackIfNeeded(mAudioStream); |
michael@0 | 1004 | |
michael@0 | 1005 | offset = audio->mOffset; |
michael@0 | 1006 | frames = audio->mFrames; |
michael@0 | 1007 | |
michael@0 | 1008 | if (offset != -1) { |
michael@0 | 1009 | mDecoder->UpdatePlaybackOffset(offset); |
michael@0 | 1010 | } |
michael@0 | 1011 | return frames; |
michael@0 | 1012 | } |
michael@0 | 1013 | |
michael@0 | 1014 | nsresult MediaDecoderStateMachine::Init(MediaDecoderStateMachine* aCloneDonor) |
michael@0 | 1015 | { |
michael@0 | 1016 | MOZ_ASSERT(NS_IsMainThread()); |
michael@0 | 1017 | |
michael@0 | 1018 | RefPtr<SharedThreadPool> decodePool( |
michael@0 | 1019 | SharedThreadPool::Get(NS_LITERAL_CSTRING("Media Decode"), |
michael@0 | 1020 | Preferences::GetUint("media.num-decode-threads", 25))); |
michael@0 | 1021 | NS_ENSURE_TRUE(decodePool, NS_ERROR_FAILURE); |
michael@0 | 1022 | |
michael@0 | 1023 | RefPtr<SharedThreadPool> stateMachinePool( |
michael@0 | 1024 | SharedThreadPool::Get(NS_LITERAL_CSTRING("Media State Machine"), 1)); |
michael@0 | 1025 | NS_ENSURE_TRUE(stateMachinePool, NS_ERROR_FAILURE); |
michael@0 | 1026 | |
michael@0 | 1027 | mDecodeTaskQueue = new MediaTaskQueue(decodePool.forget()); |
michael@0 | 1028 | NS_ENSURE_TRUE(mDecodeTaskQueue, NS_ERROR_FAILURE); |
michael@0 | 1029 | |
michael@0 | 1030 | MediaDecoderReader* cloneReader = nullptr; |
michael@0 | 1031 | if (aCloneDonor) { |
michael@0 | 1032 | cloneReader = aCloneDonor->mReader; |
michael@0 | 1033 | } |
michael@0 | 1034 | |
michael@0 | 1035 | mStateMachineThreadPool = stateMachinePool; |
michael@0 | 1036 | |
michael@0 | 1037 | nsresult rv; |
michael@0 | 1038 | mTimer = do_CreateInstance("@mozilla.org/timer;1", &rv); |
michael@0 | 1039 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1040 | rv = mTimer->SetTarget(GetStateMachineThread()); |
michael@0 | 1041 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1042 | |
michael@0 | 1043 | return mReader->Init(cloneReader); |
michael@0 | 1044 | } |
michael@0 | 1045 | |
michael@0 | 1046 | void MediaDecoderStateMachine::StopPlayback() |
michael@0 | 1047 | { |
michael@0 | 1048 | DECODER_LOG(PR_LOG_DEBUG, "StopPlayback()"); |
michael@0 | 1049 | |
michael@0 | 1050 | AssertCurrentThreadInMonitor(); |
michael@0 | 1051 | |
michael@0 | 1052 | mDecoder->NotifyPlaybackStopped(); |
michael@0 | 1053 | |
michael@0 | 1054 | if (IsPlaying()) { |
michael@0 | 1055 | mPlayDuration = GetClock(); |
michael@0 | 1056 | mPlayStartTime = TimeStamp(); |
michael@0 | 1057 | } |
michael@0 | 1058 | // Notify the audio thread, so that it notices that we've stopped playing, |
michael@0 | 1059 | // so it can pause audio playback. |
michael@0 | 1060 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1061 | NS_ASSERTION(!IsPlaying(), "Should report not playing at end of StopPlayback()"); |
michael@0 | 1062 | mDecoder->UpdateStreamBlockingForStateMachinePlaying(); |
michael@0 | 1063 | |
michael@0 | 1064 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 1065 | } |
michael@0 | 1066 | |
michael@0 | 1067 | void MediaDecoderStateMachine::SetSyncPointForMediaStream() |
michael@0 | 1068 | { |
michael@0 | 1069 | AssertCurrentThreadInMonitor(); |
michael@0 | 1070 | |
michael@0 | 1071 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 1072 | if (!stream) { |
michael@0 | 1073 | return; |
michael@0 | 1074 | } |
michael@0 | 1075 | |
michael@0 | 1076 | mSyncPointInMediaStream = stream->GetLastOutputTime(); |
michael@0 | 1077 | mSyncPointInDecodedStream = mStartTime + mPlayDuration; |
michael@0 | 1078 | } |
michael@0 | 1079 | |
michael@0 | 1080 | int64_t MediaDecoderStateMachine::GetCurrentTimeViaMediaStreamSync() |
michael@0 | 1081 | { |
michael@0 | 1082 | AssertCurrentThreadInMonitor(); |
michael@0 | 1083 | NS_ASSERTION(mSyncPointInDecodedStream >= 0, "Should have set up sync point"); |
michael@0 | 1084 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 1085 | StreamTime streamDelta = stream->GetLastOutputTime() - mSyncPointInMediaStream; |
michael@0 | 1086 | return mSyncPointInDecodedStream + MediaTimeToMicroseconds(streamDelta); |
michael@0 | 1087 | } |
michael@0 | 1088 | |
michael@0 | 1089 | void MediaDecoderStateMachine::StartPlayback() |
michael@0 | 1090 | { |
michael@0 | 1091 | DECODER_LOG(PR_LOG_DEBUG, "StartPlayback()"); |
michael@0 | 1092 | |
michael@0 | 1093 | NS_ASSERTION(!IsPlaying(), "Shouldn't be playing when StartPlayback() is called"); |
michael@0 | 1094 | AssertCurrentThreadInMonitor(); |
michael@0 | 1095 | |
michael@0 | 1096 | mDecoder->NotifyPlaybackStarted(); |
michael@0 | 1097 | mPlayStartTime = TimeStamp::Now(); |
michael@0 | 1098 | |
michael@0 | 1099 | NS_ASSERTION(IsPlaying(), "Should report playing by end of StartPlayback()"); |
michael@0 | 1100 | if (NS_FAILED(StartAudioThread())) { |
michael@0 | 1101 | NS_WARNING("Failed to create audio thread"); |
michael@0 | 1102 | } |
michael@0 | 1103 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1104 | mDecoder->UpdateStreamBlockingForStateMachinePlaying(); |
michael@0 | 1105 | } |
michael@0 | 1106 | |
michael@0 | 1107 | void MediaDecoderStateMachine::UpdatePlaybackPositionInternal(int64_t aTime) |
michael@0 | 1108 | { |
michael@0 | 1109 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1110 | "Should be on state machine thread."); |
michael@0 | 1111 | AssertCurrentThreadInMonitor(); |
michael@0 | 1112 | |
michael@0 | 1113 | NS_ASSERTION(mStartTime >= 0, "Should have positive mStartTime"); |
michael@0 | 1114 | mCurrentFrameTime = aTime - mStartTime; |
michael@0 | 1115 | NS_ASSERTION(mCurrentFrameTime >= 0, "CurrentTime should be positive!"); |
michael@0 | 1116 | if (aTime > mEndTime) { |
michael@0 | 1117 | NS_ASSERTION(mCurrentFrameTime > GetDuration(), |
michael@0 | 1118 | "CurrentTime must be after duration if aTime > endTime!"); |
michael@0 | 1119 | mEndTime = aTime; |
michael@0 | 1120 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 1121 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged); |
michael@0 | 1122 | NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); |
michael@0 | 1123 | } |
michael@0 | 1124 | } |
michael@0 | 1125 | |
michael@0 | 1126 | void MediaDecoderStateMachine::UpdatePlaybackPosition(int64_t aTime) |
michael@0 | 1127 | { |
michael@0 | 1128 | UpdatePlaybackPositionInternal(aTime); |
michael@0 | 1129 | |
michael@0 | 1130 | bool fragmentEnded = mFragmentEndTime >= 0 && GetMediaTime() >= mFragmentEndTime; |
michael@0 | 1131 | if (!mPositionChangeQueued || fragmentEnded) { |
michael@0 | 1132 | mPositionChangeQueued = true; |
michael@0 | 1133 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 1134 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackPositionChanged); |
michael@0 | 1135 | NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); |
michael@0 | 1136 | } |
michael@0 | 1137 | |
michael@0 | 1138 | mMetadataManager.DispatchMetadataIfNeeded(mDecoder, aTime); |
michael@0 | 1139 | |
michael@0 | 1140 | if (fragmentEnded) { |
michael@0 | 1141 | StopPlayback(); |
michael@0 | 1142 | } |
michael@0 | 1143 | } |
michael@0 | 1144 | |
michael@0 | 1145 | void MediaDecoderStateMachine::ClearPositionChangeFlag() |
michael@0 | 1146 | { |
michael@0 | 1147 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1148 | AssertCurrentThreadInMonitor(); |
michael@0 | 1149 | |
michael@0 | 1150 | mPositionChangeQueued = false; |
michael@0 | 1151 | } |
michael@0 | 1152 | |
michael@0 | 1153 | MediaDecoderOwner::NextFrameStatus MediaDecoderStateMachine::GetNextFrameStatus() |
michael@0 | 1154 | { |
michael@0 | 1155 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1156 | if (IsBuffering() || IsSeeking()) { |
michael@0 | 1157 | return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING; |
michael@0 | 1158 | } else if (HaveNextFrameData()) { |
michael@0 | 1159 | return MediaDecoderOwner::NEXT_FRAME_AVAILABLE; |
michael@0 | 1160 | } |
michael@0 | 1161 | return MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE; |
michael@0 | 1162 | } |
michael@0 | 1163 | |
michael@0 | 1164 | void MediaDecoderStateMachine::SetVolume(double volume) |
michael@0 | 1165 | { |
michael@0 | 1166 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1167 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1168 | mVolume = volume; |
michael@0 | 1169 | } |
michael@0 | 1170 | |
michael@0 | 1171 | void MediaDecoderStateMachine::SetAudioCaptured(bool aCaptured) |
michael@0 | 1172 | { |
michael@0 | 1173 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1174 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1175 | if (!mAudioCaptured && aCaptured && !mStopAudioThread) { |
michael@0 | 1176 | // Make sure the state machine runs as soon as possible. That will |
michael@0 | 1177 | // stop the audio thread. |
michael@0 | 1178 | // If mStopAudioThread is true then we're already stopping the audio thread |
michael@0 | 1179 | // and since we set mAudioCaptured to true, nothing can start it again. |
michael@0 | 1180 | ScheduleStateMachine(); |
michael@0 | 1181 | } |
michael@0 | 1182 | mAudioCaptured = aCaptured; |
michael@0 | 1183 | } |
michael@0 | 1184 | |
michael@0 | 1185 | double MediaDecoderStateMachine::GetCurrentTime() const |
michael@0 | 1186 | { |
michael@0 | 1187 | NS_ASSERTION(NS_IsMainThread() || |
michael@0 | 1188 | OnStateMachineThread() || |
michael@0 | 1189 | OnDecodeThread(), |
michael@0 | 1190 | "Should be on main, decode, or state machine thread."); |
michael@0 | 1191 | |
michael@0 | 1192 | return static_cast<double>(mCurrentFrameTime) / static_cast<double>(USECS_PER_S); |
michael@0 | 1193 | } |
michael@0 | 1194 | |
michael@0 | 1195 | int64_t MediaDecoderStateMachine::GetDuration() |
michael@0 | 1196 | { |
michael@0 | 1197 | AssertCurrentThreadInMonitor(); |
michael@0 | 1198 | |
michael@0 | 1199 | if (mEndTime == -1 || mStartTime == -1) |
michael@0 | 1200 | return -1; |
michael@0 | 1201 | return mEndTime - mStartTime; |
michael@0 | 1202 | } |
michael@0 | 1203 | |
michael@0 | 1204 | void MediaDecoderStateMachine::SetDuration(int64_t aDuration) |
michael@0 | 1205 | { |
michael@0 | 1206 | NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), |
michael@0 | 1207 | "Should be on main or decode thread."); |
michael@0 | 1208 | AssertCurrentThreadInMonitor(); |
michael@0 | 1209 | |
michael@0 | 1210 | if (aDuration == -1) { |
michael@0 | 1211 | return; |
michael@0 | 1212 | } |
michael@0 | 1213 | |
michael@0 | 1214 | if (mStartTime != -1) { |
michael@0 | 1215 | mEndTime = mStartTime + aDuration; |
michael@0 | 1216 | } else { |
michael@0 | 1217 | mStartTime = 0; |
michael@0 | 1218 | mEndTime = aDuration; |
michael@0 | 1219 | } |
michael@0 | 1220 | } |
michael@0 | 1221 | |
michael@0 | 1222 | void MediaDecoderStateMachine::UpdateEstimatedDuration(int64_t aDuration) |
michael@0 | 1223 | { |
michael@0 | 1224 | AssertCurrentThreadInMonitor(); |
michael@0 | 1225 | int64_t duration = GetDuration(); |
michael@0 | 1226 | if (aDuration != duration && |
michael@0 | 1227 | abs(aDuration - duration) > ESTIMATED_DURATION_FUZZ_FACTOR_USECS) { |
michael@0 | 1228 | SetDuration(aDuration); |
michael@0 | 1229 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 1230 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::DurationChanged); |
michael@0 | 1231 | NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); |
michael@0 | 1232 | } |
michael@0 | 1233 | } |
michael@0 | 1234 | |
michael@0 | 1235 | void MediaDecoderStateMachine::SetMediaEndTime(int64_t aEndTime) |
michael@0 | 1236 | { |
michael@0 | 1237 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread"); |
michael@0 | 1238 | AssertCurrentThreadInMonitor(); |
michael@0 | 1239 | |
michael@0 | 1240 | mEndTime = aEndTime; |
michael@0 | 1241 | } |
michael@0 | 1242 | |
michael@0 | 1243 | void MediaDecoderStateMachine::SetFragmentEndTime(int64_t aEndTime) |
michael@0 | 1244 | { |
michael@0 | 1245 | AssertCurrentThreadInMonitor(); |
michael@0 | 1246 | |
michael@0 | 1247 | mFragmentEndTime = aEndTime < 0 ? aEndTime : aEndTime + mStartTime; |
michael@0 | 1248 | } |
michael@0 | 1249 | |
michael@0 | 1250 | void MediaDecoderStateMachine::SetTransportSeekable(bool aTransportSeekable) |
michael@0 | 1251 | { |
michael@0 | 1252 | NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), |
michael@0 | 1253 | "Should be on main thread or the decoder thread."); |
michael@0 | 1254 | AssertCurrentThreadInMonitor(); |
michael@0 | 1255 | |
michael@0 | 1256 | mTransportSeekable = aTransportSeekable; |
michael@0 | 1257 | } |
michael@0 | 1258 | |
michael@0 | 1259 | void MediaDecoderStateMachine::SetMediaSeekable(bool aMediaSeekable) |
michael@0 | 1260 | { |
michael@0 | 1261 | NS_ASSERTION(NS_IsMainThread() || OnDecodeThread(), |
michael@0 | 1262 | "Should be on main thread or the decoder thread."); |
michael@0 | 1263 | |
michael@0 | 1264 | mMediaSeekable = aMediaSeekable; |
michael@0 | 1265 | } |
michael@0 | 1266 | |
michael@0 | 1267 | bool MediaDecoderStateMachine::IsDormantNeeded() |
michael@0 | 1268 | { |
michael@0 | 1269 | return mReader->IsDormantNeeded(); |
michael@0 | 1270 | } |
michael@0 | 1271 | |
michael@0 | 1272 | void MediaDecoderStateMachine::SetDormant(bool aDormant) |
michael@0 | 1273 | { |
michael@0 | 1274 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1275 | AssertCurrentThreadInMonitor(); |
michael@0 | 1276 | |
michael@0 | 1277 | if (!mReader) { |
michael@0 | 1278 | return; |
michael@0 | 1279 | } |
michael@0 | 1280 | |
michael@0 | 1281 | if (aDormant) { |
michael@0 | 1282 | ScheduleStateMachine(); |
michael@0 | 1283 | mState = DECODER_STATE_DORMANT; |
michael@0 | 1284 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1285 | } else if ((aDormant != true) && (mState == DECODER_STATE_DORMANT)) { |
michael@0 | 1286 | ScheduleStateMachine(); |
michael@0 | 1287 | mStartTime = 0; |
michael@0 | 1288 | mCurrentFrameTime = 0; |
michael@0 | 1289 | mState = DECODER_STATE_DECODING_METADATA; |
michael@0 | 1290 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1291 | } |
michael@0 | 1292 | } |
michael@0 | 1293 | |
michael@0 | 1294 | void MediaDecoderStateMachine::Shutdown() |
michael@0 | 1295 | { |
michael@0 | 1296 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1297 | |
michael@0 | 1298 | // Once we've entered the shutdown state here there's no going back. |
michael@0 | 1299 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1300 | |
michael@0 | 1301 | // Change state before issuing shutdown request to threads so those |
michael@0 | 1302 | // threads can start exiting cleanly during the Shutdown call. |
michael@0 | 1303 | DECODER_LOG(PR_LOG_DEBUG, "Changed state to SHUTDOWN"); |
michael@0 | 1304 | ScheduleStateMachine(); |
michael@0 | 1305 | mState = DECODER_STATE_SHUTDOWN; |
michael@0 | 1306 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1307 | } |
michael@0 | 1308 | |
michael@0 | 1309 | void MediaDecoderStateMachine::StartDecoding() |
michael@0 | 1310 | { |
michael@0 | 1311 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1312 | "Should be on state machine or decode thread."); |
michael@0 | 1313 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1314 | if (mState == DECODER_STATE_DECODING) { |
michael@0 | 1315 | return; |
michael@0 | 1316 | } |
michael@0 | 1317 | mState = DECODER_STATE_DECODING; |
michael@0 | 1318 | |
michael@0 | 1319 | mDecodeStartTime = TimeStamp::Now(); |
michael@0 | 1320 | |
michael@0 | 1321 | // Reset our "stream finished decoding" flags, so we try to decode all |
michael@0 | 1322 | // streams that we have when we start decoding. |
michael@0 | 1323 | mIsVideoDecoding = HasVideo() && !VideoQueue().IsFinished(); |
michael@0 | 1324 | mIsAudioDecoding = HasAudio() && !AudioQueue().IsFinished(); |
michael@0 | 1325 | |
michael@0 | 1326 | CheckIfDecodeComplete(); |
michael@0 | 1327 | if (mState == DECODER_STATE_COMPLETED) { |
michael@0 | 1328 | return; |
michael@0 | 1329 | } |
michael@0 | 1330 | |
michael@0 | 1331 | // Reset other state to pristine values before starting decode. |
michael@0 | 1332 | mSkipToNextKeyFrame = false; |
michael@0 | 1333 | mIsAudioPrerolling = true; |
michael@0 | 1334 | mIsVideoPrerolling = true; |
michael@0 | 1335 | |
michael@0 | 1336 | // Ensure that we've got tasks enqueued to decode data if we need to. |
michael@0 | 1337 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 1338 | |
michael@0 | 1339 | ScheduleStateMachine(); |
michael@0 | 1340 | } |
michael@0 | 1341 | |
michael@0 | 1342 | void MediaDecoderStateMachine::StartWaitForResources() |
michael@0 | 1343 | { |
michael@0 | 1344 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1345 | "Should be on state machine or decode thread."); |
michael@0 | 1346 | AssertCurrentThreadInMonitor(); |
michael@0 | 1347 | mState = DECODER_STATE_WAIT_FOR_RESOURCES; |
michael@0 | 1348 | } |
michael@0 | 1349 | |
michael@0 | 1350 | void MediaDecoderStateMachine::NotifyWaitingForResourcesStatusChanged() |
michael@0 | 1351 | { |
michael@0 | 1352 | AssertCurrentThreadInMonitor(); |
michael@0 | 1353 | if (mState != DECODER_STATE_WAIT_FOR_RESOURCES || |
michael@0 | 1354 | mReader->IsWaitingMediaResources()) { |
michael@0 | 1355 | return; |
michael@0 | 1356 | } |
michael@0 | 1357 | // The reader is no longer waiting for resources (say a hardware decoder), |
michael@0 | 1358 | // we can now proceed to decode metadata. |
michael@0 | 1359 | mState = DECODER_STATE_DECODING_METADATA; |
michael@0 | 1360 | EnqueueDecodeMetadataTask(); |
michael@0 | 1361 | } |
michael@0 | 1362 | |
michael@0 | 1363 | void MediaDecoderStateMachine::Play() |
michael@0 | 1364 | { |
michael@0 | 1365 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1366 | // When asked to play, switch to decoding state only if |
michael@0 | 1367 | // we are currently buffering. In other cases, we'll start playing anyway |
michael@0 | 1368 | // when the state machine notices the decoder's state change to PLAYING. |
michael@0 | 1369 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1370 | if (mState == DECODER_STATE_BUFFERING) { |
michael@0 | 1371 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING"); |
michael@0 | 1372 | mState = DECODER_STATE_DECODING; |
michael@0 | 1373 | mDecodeStartTime = TimeStamp::Now(); |
michael@0 | 1374 | } |
michael@0 | 1375 | // Once we start playing, we don't want to minimize our prerolling, as we |
michael@0 | 1376 | // assume the user is likely to want to keep playing in future. |
michael@0 | 1377 | mMinimizePreroll = false; |
michael@0 | 1378 | ScheduleStateMachine(); |
michael@0 | 1379 | } |
michael@0 | 1380 | |
michael@0 | 1381 | void MediaDecoderStateMachine::ResetPlayback() |
michael@0 | 1382 | { |
michael@0 | 1383 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 1384 | mVideoFrameEndTime = -1; |
michael@0 | 1385 | mAudioStartTime = -1; |
michael@0 | 1386 | mAudioEndTime = -1; |
michael@0 | 1387 | mAudioCompleted = false; |
michael@0 | 1388 | } |
michael@0 | 1389 | |
michael@0 | 1390 | void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer, |
michael@0 | 1391 | uint32_t aLength, |
michael@0 | 1392 | int64_t aOffset) |
michael@0 | 1393 | { |
michael@0 | 1394 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1395 | mReader->NotifyDataArrived(aBuffer, aLength, aOffset); |
michael@0 | 1396 | |
michael@0 | 1397 | // While playing an unseekable stream of unknown duration, mEndTime is |
michael@0 | 1398 | // updated (in AdvanceFrame()) as we play. But if data is being downloaded |
michael@0 | 1399 | // faster than played, mEndTime won't reflect the end of playable data |
michael@0 | 1400 | // since we haven't played the frame at the end of buffered data. So update |
michael@0 | 1401 | // mEndTime here as new data is downloaded to prevent such a lag. |
michael@0 | 1402 | dom::TimeRanges buffered; |
michael@0 | 1403 | if (mDecoder->IsInfinite() && |
michael@0 | 1404 | NS_SUCCEEDED(mDecoder->GetBuffered(&buffered))) |
michael@0 | 1405 | { |
michael@0 | 1406 | uint32_t length = 0; |
michael@0 | 1407 | buffered.GetLength(&length); |
michael@0 | 1408 | if (length) { |
michael@0 | 1409 | double end = 0; |
michael@0 | 1410 | buffered.End(length - 1, &end); |
michael@0 | 1411 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1412 | mEndTime = std::max<int64_t>(mEndTime, end * USECS_PER_S); |
michael@0 | 1413 | } |
michael@0 | 1414 | } |
michael@0 | 1415 | } |
michael@0 | 1416 | |
michael@0 | 1417 | void MediaDecoderStateMachine::Seek(const SeekTarget& aTarget) |
michael@0 | 1418 | { |
michael@0 | 1419 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 1420 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1421 | |
michael@0 | 1422 | // We need to be able to seek both at a transport level and at a media level |
michael@0 | 1423 | // to seek. |
michael@0 | 1424 | if (!mMediaSeekable) { |
michael@0 | 1425 | return; |
michael@0 | 1426 | } |
michael@0 | 1427 | // MediaDecoder::mPlayState should be SEEKING while we seek, and |
michael@0 | 1428 | // in that case MediaDecoder shouldn't be calling us. |
michael@0 | 1429 | NS_ASSERTION(mState != DECODER_STATE_SEEKING, |
michael@0 | 1430 | "We shouldn't already be seeking"); |
michael@0 | 1431 | NS_ASSERTION(mState >= DECODER_STATE_DECODING, |
michael@0 | 1432 | "We should have loaded metadata"); |
michael@0 | 1433 | |
michael@0 | 1434 | // Bound the seek time to be inside the media range. |
michael@0 | 1435 | NS_ASSERTION(mStartTime != -1, "Should know start time by now"); |
michael@0 | 1436 | NS_ASSERTION(mEndTime != -1, "Should know end time by now"); |
michael@0 | 1437 | int64_t seekTime = aTarget.mTime + mStartTime; |
michael@0 | 1438 | seekTime = std::min(seekTime, mEndTime); |
michael@0 | 1439 | seekTime = std::max(mStartTime, seekTime); |
michael@0 | 1440 | NS_ASSERTION(seekTime >= mStartTime && seekTime <= mEndTime, |
michael@0 | 1441 | "Can only seek in range [0,duration]"); |
michael@0 | 1442 | mSeekTarget = SeekTarget(seekTime, aTarget.mType); |
michael@0 | 1443 | |
michael@0 | 1444 | mBasePosition = seekTime - mStartTime; |
michael@0 | 1445 | DECODER_LOG(PR_LOG_DEBUG, "Changed state to SEEKING (to %lld)", mSeekTarget.mTime); |
michael@0 | 1446 | mState = DECODER_STATE_SEEKING; |
michael@0 | 1447 | if (mDecoder->GetDecodedStream()) { |
michael@0 | 1448 | mDecoder->RecreateDecodedStream(seekTime - mStartTime); |
michael@0 | 1449 | } |
michael@0 | 1450 | ScheduleStateMachine(); |
michael@0 | 1451 | } |
michael@0 | 1452 | |
michael@0 | 1453 | void MediaDecoderStateMachine::StopAudioThread() |
michael@0 | 1454 | { |
michael@0 | 1455 | NS_ASSERTION(OnDecodeThread() || |
michael@0 | 1456 | OnStateMachineThread(), "Should be on decode thread or state machine thread"); |
michael@0 | 1457 | AssertCurrentThreadInMonitor(); |
michael@0 | 1458 | |
michael@0 | 1459 | if (mStopAudioThread) { |
michael@0 | 1460 | // Nothing to do, since the thread is already stopping |
michael@0 | 1461 | return; |
michael@0 | 1462 | } |
michael@0 | 1463 | |
michael@0 | 1464 | mStopAudioThread = true; |
michael@0 | 1465 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1466 | if (mAudioThread) { |
michael@0 | 1467 | DECODER_LOG(PR_LOG_DEBUG, "Shutdown audio thread"); |
michael@0 | 1468 | { |
michael@0 | 1469 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1470 | mAudioThread->Shutdown(); |
michael@0 | 1471 | } |
michael@0 | 1472 | mAudioThread = nullptr; |
michael@0 | 1473 | // Now that the audio thread is dead, try sending data to our MediaStream(s). |
michael@0 | 1474 | // That may have been waiting for the audio thread to stop. |
michael@0 | 1475 | SendStreamData(); |
michael@0 | 1476 | } |
michael@0 | 1477 | } |
michael@0 | 1478 | |
michael@0 | 1479 | nsresult |
michael@0 | 1480 | MediaDecoderStateMachine::EnqueueDecodeMetadataTask() |
michael@0 | 1481 | { |
michael@0 | 1482 | AssertCurrentThreadInMonitor(); |
michael@0 | 1483 | |
michael@0 | 1484 | if (mState != DECODER_STATE_DECODING_METADATA) { |
michael@0 | 1485 | return NS_OK; |
michael@0 | 1486 | } |
michael@0 | 1487 | nsresult rv = mDecodeTaskQueue->Dispatch( |
michael@0 | 1488 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::CallDecodeMetadata)); |
michael@0 | 1489 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1490 | |
michael@0 | 1491 | return NS_OK; |
michael@0 | 1492 | } |
michael@0 | 1493 | |
michael@0 | 1494 | void |
michael@0 | 1495 | MediaDecoderStateMachine::EnsureActive() |
michael@0 | 1496 | { |
michael@0 | 1497 | AssertCurrentThreadInMonitor(); |
michael@0 | 1498 | MOZ_ASSERT(OnDecodeThread()); |
michael@0 | 1499 | if (!mIsReaderIdle) { |
michael@0 | 1500 | return; |
michael@0 | 1501 | } |
michael@0 | 1502 | mIsReaderIdle = false; |
michael@0 | 1503 | { |
michael@0 | 1504 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1505 | SetReaderActive(); |
michael@0 | 1506 | } |
michael@0 | 1507 | } |
michael@0 | 1508 | |
michael@0 | 1509 | void |
michael@0 | 1510 | MediaDecoderStateMachine::SetReaderIdle() |
michael@0 | 1511 | { |
michael@0 | 1512 | #ifdef PR_LOGGING |
michael@0 | 1513 | { |
michael@0 | 1514 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1515 | DECODER_LOG(PR_LOG_DEBUG, "SetReaderIdle() audioQueue=%lld videoQueue=%lld", |
michael@0 | 1516 | GetDecodedAudioDuration(), |
michael@0 | 1517 | VideoQueue().Duration()); |
michael@0 | 1518 | } |
michael@0 | 1519 | #endif |
michael@0 | 1520 | MOZ_ASSERT(OnDecodeThread()); |
michael@0 | 1521 | mReader->SetIdle(); |
michael@0 | 1522 | } |
michael@0 | 1523 | |
michael@0 | 1524 | void |
michael@0 | 1525 | MediaDecoderStateMachine::SetReaderActive() |
michael@0 | 1526 | { |
michael@0 | 1527 | DECODER_LOG(PR_LOG_DEBUG, "SetReaderActive()"); |
michael@0 | 1528 | MOZ_ASSERT(OnDecodeThread()); |
michael@0 | 1529 | mReader->SetActive(); |
michael@0 | 1530 | } |
michael@0 | 1531 | |
michael@0 | 1532 | void |
michael@0 | 1533 | MediaDecoderStateMachine::DispatchDecodeTasksIfNeeded() |
michael@0 | 1534 | { |
michael@0 | 1535 | AssertCurrentThreadInMonitor(); |
michael@0 | 1536 | |
michael@0 | 1537 | // NeedToDecodeAudio() can go from false to true while we hold the |
michael@0 | 1538 | // monitor, but it can't go from true to false. This can happen because |
michael@0 | 1539 | // NeedToDecodeAudio() takes into account the amount of decoded audio |
michael@0 | 1540 | // that's been written to the AudioStream but not played yet. So if we |
michael@0 | 1541 | // were calling NeedToDecodeAudio() twice and we thread-context switch |
michael@0 | 1542 | // between the calls, audio can play, which can affect the return value |
michael@0 | 1543 | // of NeedToDecodeAudio() giving inconsistent results. So we cache the |
michael@0 | 1544 | // value returned by NeedToDecodeAudio(), and make decisions |
michael@0 | 1545 | // based on the cached value. If NeedToDecodeAudio() has |
michael@0 | 1546 | // returned false, and then subsequently returns true and we're not |
michael@0 | 1547 | // playing, it will probably be OK since we don't need to consume data |
michael@0 | 1548 | // anyway. |
michael@0 | 1549 | |
michael@0 | 1550 | const bool needToDecodeAudio = NeedToDecodeAudio(); |
michael@0 | 1551 | const bool needToDecodeVideo = NeedToDecodeVideo(); |
michael@0 | 1552 | |
michael@0 | 1553 | // If we're in completed state, we should not need to decode anything else. |
michael@0 | 1554 | MOZ_ASSERT(mState != DECODER_STATE_COMPLETED || |
michael@0 | 1555 | (!needToDecodeAudio && !needToDecodeVideo)); |
michael@0 | 1556 | |
michael@0 | 1557 | bool needIdle = !mDecoder->IsLogicallyPlaying() && |
michael@0 | 1558 | mState != DECODER_STATE_SEEKING && |
michael@0 | 1559 | !needToDecodeAudio && |
michael@0 | 1560 | !needToDecodeVideo && |
michael@0 | 1561 | !IsPlaying(); |
michael@0 | 1562 | |
michael@0 | 1563 | if (needToDecodeAudio) { |
michael@0 | 1564 | EnsureAudioDecodeTaskQueued(); |
michael@0 | 1565 | } |
michael@0 | 1566 | if (needToDecodeVideo) { |
michael@0 | 1567 | EnsureVideoDecodeTaskQueued(); |
michael@0 | 1568 | } |
michael@0 | 1569 | |
michael@0 | 1570 | if (mIsReaderIdle == needIdle) { |
michael@0 | 1571 | return; |
michael@0 | 1572 | } |
michael@0 | 1573 | mIsReaderIdle = needIdle; |
michael@0 | 1574 | RefPtr<nsIRunnable> event; |
michael@0 | 1575 | if (mIsReaderIdle) { |
michael@0 | 1576 | event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderIdle); |
michael@0 | 1577 | } else { |
michael@0 | 1578 | event = NS_NewRunnableMethod(this, &MediaDecoderStateMachine::SetReaderActive); |
michael@0 | 1579 | } |
michael@0 | 1580 | if (NS_FAILED(mDecodeTaskQueue->Dispatch(event.forget())) && |
michael@0 | 1581 | mState != DECODER_STATE_SHUTDOWN) { |
michael@0 | 1582 | NS_WARNING("Failed to dispatch event to set decoder idle state"); |
michael@0 | 1583 | } |
michael@0 | 1584 | } |
michael@0 | 1585 | |
michael@0 | 1586 | nsresult |
michael@0 | 1587 | MediaDecoderStateMachine::EnqueueDecodeSeekTask() |
michael@0 | 1588 | { |
michael@0 | 1589 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1590 | "Should be on state machine or decode thread."); |
michael@0 | 1591 | AssertCurrentThreadInMonitor(); |
michael@0 | 1592 | |
michael@0 | 1593 | if (mState != DECODER_STATE_SEEKING) { |
michael@0 | 1594 | return NS_OK; |
michael@0 | 1595 | } |
michael@0 | 1596 | nsresult rv = mDecodeTaskQueue->Dispatch( |
michael@0 | 1597 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeSeek)); |
michael@0 | 1598 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1599 | |
michael@0 | 1600 | return NS_OK; |
michael@0 | 1601 | } |
michael@0 | 1602 | |
michael@0 | 1603 | nsresult |
michael@0 | 1604 | MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded() |
michael@0 | 1605 | { |
michael@0 | 1606 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1607 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1608 | "Should be on state machine or decode thread."); |
michael@0 | 1609 | |
michael@0 | 1610 | if (NeedToDecodeAudio()) { |
michael@0 | 1611 | return EnsureAudioDecodeTaskQueued(); |
michael@0 | 1612 | } |
michael@0 | 1613 | |
michael@0 | 1614 | return NS_OK; |
michael@0 | 1615 | } |
michael@0 | 1616 | |
michael@0 | 1617 | nsresult |
michael@0 | 1618 | MediaDecoderStateMachine::EnsureAudioDecodeTaskQueued() |
michael@0 | 1619 | { |
michael@0 | 1620 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1621 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1622 | "Should be on state machine or decode thread."); |
michael@0 | 1623 | |
michael@0 | 1624 | if (mState >= DECODER_STATE_COMPLETED) { |
michael@0 | 1625 | return NS_OK; |
michael@0 | 1626 | } |
michael@0 | 1627 | |
michael@0 | 1628 | MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA); |
michael@0 | 1629 | |
michael@0 | 1630 | if (mIsAudioDecoding && !mDispatchedAudioDecodeTask) { |
michael@0 | 1631 | nsresult rv = mDecodeTaskQueue->Dispatch( |
michael@0 | 1632 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeAudio)); |
michael@0 | 1633 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1634 | mDispatchedAudioDecodeTask = true; |
michael@0 | 1635 | } else { |
michael@0 | 1636 | NS_WARNING("Failed to dispatch task to decode audio"); |
michael@0 | 1637 | } |
michael@0 | 1638 | } |
michael@0 | 1639 | |
michael@0 | 1640 | return NS_OK; |
michael@0 | 1641 | } |
michael@0 | 1642 | |
michael@0 | 1643 | nsresult |
michael@0 | 1644 | MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded() |
michael@0 | 1645 | { |
michael@0 | 1646 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1647 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1648 | "Should be on state machine or decode thread."); |
michael@0 | 1649 | |
michael@0 | 1650 | if (NeedToDecodeVideo()) { |
michael@0 | 1651 | return EnsureVideoDecodeTaskQueued(); |
michael@0 | 1652 | } |
michael@0 | 1653 | |
michael@0 | 1654 | return NS_OK; |
michael@0 | 1655 | } |
michael@0 | 1656 | |
michael@0 | 1657 | nsresult |
michael@0 | 1658 | MediaDecoderStateMachine::EnsureVideoDecodeTaskQueued() |
michael@0 | 1659 | { |
michael@0 | 1660 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1661 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1662 | "Should be on state machine or decode thread."); |
michael@0 | 1663 | |
michael@0 | 1664 | if (mState >= DECODER_STATE_COMPLETED) { |
michael@0 | 1665 | return NS_OK; |
michael@0 | 1666 | } |
michael@0 | 1667 | |
michael@0 | 1668 | MOZ_ASSERT(mState > DECODER_STATE_DECODING_METADATA); |
michael@0 | 1669 | |
michael@0 | 1670 | if (mIsVideoDecoding && !mDispatchedVideoDecodeTask) { |
michael@0 | 1671 | nsresult rv = mDecodeTaskQueue->Dispatch( |
michael@0 | 1672 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DecodeVideo)); |
michael@0 | 1673 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1674 | mDispatchedVideoDecodeTask = true; |
michael@0 | 1675 | } else { |
michael@0 | 1676 | NS_WARNING("Failed to dispatch task to decode video"); |
michael@0 | 1677 | } |
michael@0 | 1678 | } |
michael@0 | 1679 | |
michael@0 | 1680 | return NS_OK; |
michael@0 | 1681 | } |
michael@0 | 1682 | |
michael@0 | 1683 | nsresult |
michael@0 | 1684 | MediaDecoderStateMachine::StartAudioThread() |
michael@0 | 1685 | { |
michael@0 | 1686 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 1687 | "Should be on state machine or decode thread."); |
michael@0 | 1688 | AssertCurrentThreadInMonitor(); |
michael@0 | 1689 | if (mAudioCaptured) { |
michael@0 | 1690 | NS_ASSERTION(mStopAudioThread, "mStopAudioThread must always be true if audio is captured"); |
michael@0 | 1691 | return NS_OK; |
michael@0 | 1692 | } |
michael@0 | 1693 | |
michael@0 | 1694 | mStopAudioThread = false; |
michael@0 | 1695 | if (HasAudio() && !mAudioThread) { |
michael@0 | 1696 | nsresult rv = NS_NewNamedThread("Media Audio", |
michael@0 | 1697 | getter_AddRefs(mAudioThread), |
michael@0 | 1698 | nullptr, |
michael@0 | 1699 | MEDIA_THREAD_STACK_SIZE); |
michael@0 | 1700 | if (NS_FAILED(rv)) { |
michael@0 | 1701 | DECODER_LOG(PR_LOG_WARNING, "Changed state to SHUTDOWN because failed to create audio thread"); |
michael@0 | 1702 | mState = DECODER_STATE_SHUTDOWN; |
michael@0 | 1703 | return rv; |
michael@0 | 1704 | } |
michael@0 | 1705 | |
michael@0 | 1706 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 1707 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::AudioLoop); |
michael@0 | 1708 | mAudioThread->Dispatch(event, NS_DISPATCH_NORMAL); |
michael@0 | 1709 | } |
michael@0 | 1710 | return NS_OK; |
michael@0 | 1711 | } |
michael@0 | 1712 | |
michael@0 | 1713 | int64_t MediaDecoderStateMachine::AudioDecodedUsecs() |
michael@0 | 1714 | { |
michael@0 | 1715 | NS_ASSERTION(HasAudio(), |
michael@0 | 1716 | "Should only call AudioDecodedUsecs() when we have audio"); |
michael@0 | 1717 | // The amount of audio we have decoded is the amount of audio data we've |
michael@0 | 1718 | // already decoded and pushed to the hardware, plus the amount of audio |
michael@0 | 1719 | // data waiting to be pushed to the hardware. |
michael@0 | 1720 | int64_t pushed = (mAudioEndTime != -1) ? (mAudioEndTime - GetMediaTime()) : 0; |
michael@0 | 1721 | return pushed + AudioQueue().Duration(); |
michael@0 | 1722 | } |
michael@0 | 1723 | |
michael@0 | 1724 | bool MediaDecoderStateMachine::HasLowDecodedData(int64_t aAudioUsecs) |
michael@0 | 1725 | { |
michael@0 | 1726 | AssertCurrentThreadInMonitor(); |
michael@0 | 1727 | // We consider ourselves low on decoded data if we're low on audio, |
michael@0 | 1728 | // provided we've not decoded to the end of the audio stream, or |
michael@0 | 1729 | // if we're low on video frames, provided |
michael@0 | 1730 | // we've not decoded to the end of the video stream. |
michael@0 | 1731 | return ((HasAudio() && |
michael@0 | 1732 | !AudioQueue().IsFinished() && |
michael@0 | 1733 | AudioDecodedUsecs() < aAudioUsecs) |
michael@0 | 1734 | || |
michael@0 | 1735 | (HasVideo() && |
michael@0 | 1736 | !VideoQueue().IsFinished() && |
michael@0 | 1737 | static_cast<uint32_t>(VideoQueue().GetSize()) < LOW_VIDEO_FRAMES)); |
michael@0 | 1738 | } |
michael@0 | 1739 | |
michael@0 | 1740 | bool MediaDecoderStateMachine::HasLowUndecodedData() |
michael@0 | 1741 | { |
michael@0 | 1742 | return HasLowUndecodedData(mLowDataThresholdUsecs); |
michael@0 | 1743 | } |
michael@0 | 1744 | |
michael@0 | 1745 | bool MediaDecoderStateMachine::HasLowUndecodedData(double aUsecs) |
michael@0 | 1746 | { |
michael@0 | 1747 | AssertCurrentThreadInMonitor(); |
michael@0 | 1748 | NS_ASSERTION(mState > DECODER_STATE_DECODING_METADATA, |
michael@0 | 1749 | "Must have loaded metadata for GetBuffered() to work"); |
michael@0 | 1750 | |
michael@0 | 1751 | bool reliable; |
michael@0 | 1752 | double bytesPerSecond = mDecoder->ComputePlaybackRate(&reliable); |
michael@0 | 1753 | if (!reliable) { |
michael@0 | 1754 | // Default to assuming we have enough |
michael@0 | 1755 | return false; |
michael@0 | 1756 | } |
michael@0 | 1757 | |
michael@0 | 1758 | MediaResource* stream = mDecoder->GetResource(); |
michael@0 | 1759 | int64_t currentPos = stream->Tell(); |
michael@0 | 1760 | int64_t requiredPos = currentPos + int64_t((aUsecs/1000000.0)*bytesPerSecond); |
michael@0 | 1761 | int64_t length = stream->GetLength(); |
michael@0 | 1762 | if (length >= 0) { |
michael@0 | 1763 | requiredPos = std::min(requiredPos, length); |
michael@0 | 1764 | } |
michael@0 | 1765 | |
michael@0 | 1766 | return stream->GetCachedDataEnd(currentPos) < requiredPos; |
michael@0 | 1767 | } |
michael@0 | 1768 | |
michael@0 | 1769 | void |
michael@0 | 1770 | MediaDecoderStateMachine::DecodeError() |
michael@0 | 1771 | { |
michael@0 | 1772 | AssertCurrentThreadInMonitor(); |
michael@0 | 1773 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 1774 | |
michael@0 | 1775 | // Change state to shutdown before sending error report to MediaDecoder |
michael@0 | 1776 | // and the HTMLMediaElement, so that our pipeline can start exiting |
michael@0 | 1777 | // cleanly during the sync dispatch below. |
michael@0 | 1778 | DECODER_LOG(PR_LOG_WARNING, "Decode error, changed state to SHUTDOWN"); |
michael@0 | 1779 | ScheduleStateMachine(); |
michael@0 | 1780 | mState = DECODER_STATE_SHUTDOWN; |
michael@0 | 1781 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 1782 | |
michael@0 | 1783 | // Dispatch the event to call DecodeError synchronously. This ensures |
michael@0 | 1784 | // we're in shutdown state by the time we exit the decode thread. |
michael@0 | 1785 | // If we just moved to shutdown state here on the decode thread, we may |
michael@0 | 1786 | // cause the state machine to shutdown/free memory without closing its |
michael@0 | 1787 | // media stream properly, and we'll get callbacks from the media stream |
michael@0 | 1788 | // causing a crash. |
michael@0 | 1789 | { |
michael@0 | 1790 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 1791 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::DecodeError); |
michael@0 | 1792 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1793 | NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); |
michael@0 | 1794 | } |
michael@0 | 1795 | } |
michael@0 | 1796 | |
michael@0 | 1797 | void |
michael@0 | 1798 | MediaDecoderStateMachine::CallDecodeMetadata() |
michael@0 | 1799 | { |
michael@0 | 1800 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1801 | if (mState != DECODER_STATE_DECODING_METADATA) { |
michael@0 | 1802 | return; |
michael@0 | 1803 | } |
michael@0 | 1804 | if (NS_FAILED(DecodeMetadata())) { |
michael@0 | 1805 | DECODER_LOG(PR_LOG_WARNING, "Decode metadata failed, shutting down decoder"); |
michael@0 | 1806 | DecodeError(); |
michael@0 | 1807 | } |
michael@0 | 1808 | } |
michael@0 | 1809 | |
michael@0 | 1810 | nsresult MediaDecoderStateMachine::DecodeMetadata() |
michael@0 | 1811 | { |
michael@0 | 1812 | AssertCurrentThreadInMonitor(); |
michael@0 | 1813 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 1814 | DECODER_LOG(PR_LOG_DEBUG, "Decoding Media Headers"); |
michael@0 | 1815 | if (mState != DECODER_STATE_DECODING_METADATA) { |
michael@0 | 1816 | return NS_ERROR_FAILURE; |
michael@0 | 1817 | } |
michael@0 | 1818 | EnsureActive(); |
michael@0 | 1819 | |
michael@0 | 1820 | nsresult res; |
michael@0 | 1821 | MediaInfo info; |
michael@0 | 1822 | MetadataTags* tags; |
michael@0 | 1823 | { |
michael@0 | 1824 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1825 | res = mReader->ReadMetadata(&info, &tags); |
michael@0 | 1826 | } |
michael@0 | 1827 | if (NS_SUCCEEDED(res) && |
michael@0 | 1828 | mState == DECODER_STATE_DECODING_METADATA && |
michael@0 | 1829 | mReader->IsWaitingMediaResources()) { |
michael@0 | 1830 | // change state to DECODER_STATE_WAIT_FOR_RESOURCES |
michael@0 | 1831 | StartWaitForResources(); |
michael@0 | 1832 | return NS_OK; |
michael@0 | 1833 | } |
michael@0 | 1834 | |
michael@0 | 1835 | mInfo = info; |
michael@0 | 1836 | |
michael@0 | 1837 | if (NS_FAILED(res) || (!info.HasValidMedia())) { |
michael@0 | 1838 | return NS_ERROR_FAILURE; |
michael@0 | 1839 | } |
michael@0 | 1840 | mDecoder->StartProgressUpdates(); |
michael@0 | 1841 | mGotDurationFromMetaData = (GetDuration() != -1); |
michael@0 | 1842 | |
michael@0 | 1843 | VideoData* videoData = FindStartTime(); |
michael@0 | 1844 | if (videoData) { |
michael@0 | 1845 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1846 | RenderVideoFrame(videoData, TimeStamp::Now()); |
michael@0 | 1847 | } |
michael@0 | 1848 | |
michael@0 | 1849 | if (mState == DECODER_STATE_SHUTDOWN) { |
michael@0 | 1850 | return NS_ERROR_FAILURE; |
michael@0 | 1851 | } |
michael@0 | 1852 | |
michael@0 | 1853 | NS_ASSERTION(mStartTime != -1, "Must have start time"); |
michael@0 | 1854 | MOZ_ASSERT((!HasVideo() && !HasAudio()) || |
michael@0 | 1855 | !(mMediaSeekable && mTransportSeekable) || mEndTime != -1, |
michael@0 | 1856 | "Active seekable media should have end time"); |
michael@0 | 1857 | MOZ_ASSERT(!(mMediaSeekable && mTransportSeekable) || |
michael@0 | 1858 | GetDuration() != -1, "Seekable media should have duration"); |
michael@0 | 1859 | DECODER_LOG(PR_LOG_DEBUG, "Media goes from %lld to %lld (duration %lld) " |
michael@0 | 1860 | "transportSeekable=%d, mediaSeekable=%d", |
michael@0 | 1861 | mStartTime, mEndTime, GetDuration(), mTransportSeekable, mMediaSeekable); |
michael@0 | 1862 | |
michael@0 | 1863 | if (HasAudio() && !HasVideo()) { |
michael@0 | 1864 | // We're playing audio only. We don't need to worry about slow video |
michael@0 | 1865 | // decodes causing audio underruns, so don't buffer so much audio in |
michael@0 | 1866 | // order to reduce memory usage. |
michael@0 | 1867 | mAmpleAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; |
michael@0 | 1868 | mLowAudioThresholdUsecs /= NO_VIDEO_AMPLE_AUDIO_DIVISOR; |
michael@0 | 1869 | } |
michael@0 | 1870 | |
michael@0 | 1871 | // Inform the element that we've loaded the metadata and the first frame. |
michael@0 | 1872 | nsCOMPtr<nsIRunnable> metadataLoadedEvent = |
michael@0 | 1873 | new AudioMetadataEventRunner(mDecoder, |
michael@0 | 1874 | mInfo.mAudio.mChannels, |
michael@0 | 1875 | mInfo.mAudio.mRate, |
michael@0 | 1876 | HasAudio(), |
michael@0 | 1877 | HasVideo(), |
michael@0 | 1878 | tags); |
michael@0 | 1879 | NS_DispatchToMainThread(metadataLoadedEvent, NS_DISPATCH_NORMAL); |
michael@0 | 1880 | |
michael@0 | 1881 | if (HasAudio()) { |
michael@0 | 1882 | RefPtr<nsIRunnable> decodeTask( |
michael@0 | 1883 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchAudioDecodeTaskIfNeeded)); |
michael@0 | 1884 | AudioQueue().AddPopListener(decodeTask, mDecodeTaskQueue); |
michael@0 | 1885 | } |
michael@0 | 1886 | if (HasVideo()) { |
michael@0 | 1887 | RefPtr<nsIRunnable> decodeTask( |
michael@0 | 1888 | NS_NewRunnableMethod(this, &MediaDecoderStateMachine::DispatchVideoDecodeTaskIfNeeded)); |
michael@0 | 1889 | VideoQueue().AddPopListener(decodeTask, mDecodeTaskQueue); |
michael@0 | 1890 | } |
michael@0 | 1891 | |
michael@0 | 1892 | if (mState == DECODER_STATE_DECODING_METADATA) { |
michael@0 | 1893 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING_METADATA to DECODING"); |
michael@0 | 1894 | StartDecoding(); |
michael@0 | 1895 | } |
michael@0 | 1896 | |
michael@0 | 1897 | // For very short media FindStartTime() can decode the entire media. |
michael@0 | 1898 | // So we need to check if this has occurred, else our decode pipeline won't |
michael@0 | 1899 | // run (since it doesn't need to) and we won't detect end of stream. |
michael@0 | 1900 | CheckIfDecodeComplete(); |
michael@0 | 1901 | |
michael@0 | 1902 | if ((mState == DECODER_STATE_DECODING || mState == DECODER_STATE_COMPLETED) && |
michael@0 | 1903 | mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 1904 | !IsPlaying()) |
michael@0 | 1905 | { |
michael@0 | 1906 | StartPlayback(); |
michael@0 | 1907 | } |
michael@0 | 1908 | |
michael@0 | 1909 | return NS_OK; |
michael@0 | 1910 | } |
michael@0 | 1911 | |
michael@0 | 1912 | void MediaDecoderStateMachine::DecodeSeek() |
michael@0 | 1913 | { |
michael@0 | 1914 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1915 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 1916 | if (mState != DECODER_STATE_SEEKING) { |
michael@0 | 1917 | return; |
michael@0 | 1918 | } |
michael@0 | 1919 | EnsureActive(); |
michael@0 | 1920 | |
michael@0 | 1921 | // During the seek, don't have a lock on the decoder state, |
michael@0 | 1922 | // otherwise long seek operations can block the main thread. |
michael@0 | 1923 | // The events dispatched to the main thread are SYNC calls. |
michael@0 | 1924 | // These calls are made outside of the decode monitor lock so |
michael@0 | 1925 | // it is safe for the main thread to makes calls that acquire |
michael@0 | 1926 | // the lock since it won't deadlock. We check the state when |
michael@0 | 1927 | // acquiring the lock again in case shutdown has occurred |
michael@0 | 1928 | // during the time when we didn't have the lock. |
michael@0 | 1929 | int64_t seekTime = mSeekTarget.mTime; |
michael@0 | 1930 | mDecoder->StopProgressUpdates(); |
michael@0 | 1931 | |
michael@0 | 1932 | bool currentTimeChanged = false; |
michael@0 | 1933 | const int64_t mediaTime = GetMediaTime(); |
michael@0 | 1934 | if (mediaTime != seekTime) { |
michael@0 | 1935 | currentTimeChanged = true; |
michael@0 | 1936 | // Stop playback now to ensure that while we're outside the monitor |
michael@0 | 1937 | // dispatching SeekingStarted, playback doesn't advance and mess with |
michael@0 | 1938 | // mCurrentFrameTime that we've setting to seekTime here. |
michael@0 | 1939 | StopPlayback(); |
michael@0 | 1940 | UpdatePlaybackPositionInternal(seekTime); |
michael@0 | 1941 | } |
michael@0 | 1942 | |
michael@0 | 1943 | // SeekingStarted will do a UpdateReadyStateForData which will |
michael@0 | 1944 | // inform the element and its users that we have no frames |
michael@0 | 1945 | // to display |
michael@0 | 1946 | { |
michael@0 | 1947 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1948 | nsCOMPtr<nsIRunnable> startEvent = |
michael@0 | 1949 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStarted); |
michael@0 | 1950 | NS_DispatchToMainThread(startEvent, NS_DISPATCH_SYNC); |
michael@0 | 1951 | } |
michael@0 | 1952 | |
michael@0 | 1953 | int64_t newCurrentTime = seekTime; |
michael@0 | 1954 | if (currentTimeChanged) { |
michael@0 | 1955 | // The seek target is different than the current playback position, |
michael@0 | 1956 | // we'll need to seek the playback position, so shutdown our decode |
michael@0 | 1957 | // and audio threads. |
michael@0 | 1958 | StopAudioThread(); |
michael@0 | 1959 | ResetPlayback(); |
michael@0 | 1960 | nsresult res; |
michael@0 | 1961 | { |
michael@0 | 1962 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1963 | // Now perform the seek. We must not hold the state machine monitor |
michael@0 | 1964 | // while we seek, since the seek reads, which could block on I/O. |
michael@0 | 1965 | res = mReader->Seek(seekTime, |
michael@0 | 1966 | mStartTime, |
michael@0 | 1967 | mEndTime, |
michael@0 | 1968 | mediaTime); |
michael@0 | 1969 | |
michael@0 | 1970 | if (NS_SUCCEEDED(res) && mSeekTarget.mType == SeekTarget::Accurate) { |
michael@0 | 1971 | res = mReader->DecodeToTarget(seekTime); |
michael@0 | 1972 | } |
michael@0 | 1973 | } |
michael@0 | 1974 | |
michael@0 | 1975 | if (NS_SUCCEEDED(res)) { |
michael@0 | 1976 | int64_t nextSampleStartTime = 0; |
michael@0 | 1977 | VideoData* video = nullptr; |
michael@0 | 1978 | { |
michael@0 | 1979 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1980 | video = mReader->FindStartTime(nextSampleStartTime); |
michael@0 | 1981 | } |
michael@0 | 1982 | |
michael@0 | 1983 | // Setup timestamp state. |
michael@0 | 1984 | if (seekTime == mEndTime) { |
michael@0 | 1985 | newCurrentTime = mAudioStartTime = seekTime; |
michael@0 | 1986 | } else if (HasAudio()) { |
michael@0 | 1987 | AudioData* audio = AudioQueue().PeekFront(); |
michael@0 | 1988 | newCurrentTime = mAudioStartTime = audio ? audio->mTime : seekTime; |
michael@0 | 1989 | } else { |
michael@0 | 1990 | newCurrentTime = video ? video->mTime : seekTime; |
michael@0 | 1991 | } |
michael@0 | 1992 | mPlayDuration = newCurrentTime - mStartTime; |
michael@0 | 1993 | |
michael@0 | 1994 | if (HasVideo()) { |
michael@0 | 1995 | if (video) { |
michael@0 | 1996 | { |
michael@0 | 1997 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 1998 | RenderVideoFrame(video, TimeStamp::Now()); |
michael@0 | 1999 | } |
michael@0 | 2000 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 2001 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::Invalidate); |
michael@0 | 2002 | NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); |
michael@0 | 2003 | } |
michael@0 | 2004 | } |
michael@0 | 2005 | } else { |
michael@0 | 2006 | DecodeError(); |
michael@0 | 2007 | } |
michael@0 | 2008 | } |
michael@0 | 2009 | mDecoder->StartProgressUpdates(); |
michael@0 | 2010 | if (mState == DECODER_STATE_DECODING_METADATA || |
michael@0 | 2011 | mState == DECODER_STATE_DORMANT || |
michael@0 | 2012 | mState == DECODER_STATE_SHUTDOWN) { |
michael@0 | 2013 | return; |
michael@0 | 2014 | } |
michael@0 | 2015 | |
michael@0 | 2016 | // Change state to DECODING or COMPLETED now. SeekingStopped will |
michael@0 | 2017 | // call MediaDecoderStateMachine::Seek to reset our state to SEEKING |
michael@0 | 2018 | // if we need to seek again. |
michael@0 | 2019 | |
michael@0 | 2020 | nsCOMPtr<nsIRunnable> stopEvent; |
michael@0 | 2021 | bool isLiveStream = mDecoder->GetResource()->GetLength() == -1; |
michael@0 | 2022 | if (GetMediaTime() == mEndTime && !isLiveStream) { |
michael@0 | 2023 | // Seeked to end of media, move to COMPLETED state. Note we don't do |
michael@0 | 2024 | // this if we're playing a live stream, since the end of media will advance |
michael@0 | 2025 | // once we download more data! |
michael@0 | 2026 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to COMPLETED", seekTime); |
michael@0 | 2027 | stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStoppedAtEnd); |
michael@0 | 2028 | // Explicitly set our state so we don't decode further, and so |
michael@0 | 2029 | // we report playback ended to the media element. |
michael@0 | 2030 | mState = DECODER_STATE_COMPLETED; |
michael@0 | 2031 | mIsAudioDecoding = false; |
michael@0 | 2032 | mIsVideoDecoding = false; |
michael@0 | 2033 | DispatchDecodeTasksIfNeeded(); |
michael@0 | 2034 | } else { |
michael@0 | 2035 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from SEEKING (to %lld) to DECODING", seekTime); |
michael@0 | 2036 | stopEvent = NS_NewRunnableMethod(mDecoder, &MediaDecoder::SeekingStopped); |
michael@0 | 2037 | StartDecoding(); |
michael@0 | 2038 | } |
michael@0 | 2039 | |
michael@0 | 2040 | if (newCurrentTime != mediaTime) { |
michael@0 | 2041 | UpdatePlaybackPositionInternal(newCurrentTime); |
michael@0 | 2042 | if (mDecoder->GetDecodedStream()) { |
michael@0 | 2043 | SetSyncPointForMediaStream(); |
michael@0 | 2044 | } |
michael@0 | 2045 | } |
michael@0 | 2046 | |
michael@0 | 2047 | // Try to decode another frame to detect if we're at the end... |
michael@0 | 2048 | DECODER_LOG(PR_LOG_DEBUG, "Seek completed, mCurrentFrameTime=%lld", mCurrentFrameTime); |
michael@0 | 2049 | |
michael@0 | 2050 | { |
michael@0 | 2051 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2052 | NS_DispatchToMainThread(stopEvent, NS_DISPATCH_SYNC); |
michael@0 | 2053 | } |
michael@0 | 2054 | |
michael@0 | 2055 | // Reset quick buffering status. This ensures that if we began the |
michael@0 | 2056 | // seek while quick-buffering, we won't bypass quick buffering mode |
michael@0 | 2057 | // if we need to buffer after the seek. |
michael@0 | 2058 | mQuickBuffering = false; |
michael@0 | 2059 | |
michael@0 | 2060 | ScheduleStateMachine(); |
michael@0 | 2061 | } |
michael@0 | 2062 | |
michael@0 | 2063 | // Runnable to dispose of the decoder and state machine on the main thread. |
michael@0 | 2064 | class nsDecoderDisposeEvent : public nsRunnable { |
michael@0 | 2065 | public: |
michael@0 | 2066 | nsDecoderDisposeEvent(already_AddRefed<MediaDecoder> aDecoder, |
michael@0 | 2067 | already_AddRefed<MediaDecoderStateMachine> aStateMachine) |
michael@0 | 2068 | : mDecoder(aDecoder), mStateMachine(aStateMachine) {} |
michael@0 | 2069 | NS_IMETHOD Run() { |
michael@0 | 2070 | NS_ASSERTION(NS_IsMainThread(), "Must be on main thread."); |
michael@0 | 2071 | mStateMachine->ReleaseDecoder(); |
michael@0 | 2072 | mDecoder->ReleaseStateMachine(); |
michael@0 | 2073 | mStateMachine = nullptr; |
michael@0 | 2074 | mDecoder = nullptr; |
michael@0 | 2075 | return NS_OK; |
michael@0 | 2076 | } |
michael@0 | 2077 | private: |
michael@0 | 2078 | nsRefPtr<MediaDecoder> mDecoder; |
michael@0 | 2079 | nsRefPtr<MediaDecoderStateMachine> mStateMachine; |
michael@0 | 2080 | }; |
michael@0 | 2081 | |
michael@0 | 2082 | // Runnable which dispatches an event to the main thread to dispose of the |
michael@0 | 2083 | // decoder and state machine. This runs on the state machine thread after |
michael@0 | 2084 | // the state machine has shutdown, and all events for that state machine have |
michael@0 | 2085 | // finished running. |
michael@0 | 2086 | class nsDispatchDisposeEvent : public nsRunnable { |
michael@0 | 2087 | public: |
michael@0 | 2088 | nsDispatchDisposeEvent(MediaDecoder* aDecoder, |
michael@0 | 2089 | MediaDecoderStateMachine* aStateMachine) |
michael@0 | 2090 | : mDecoder(aDecoder), mStateMachine(aStateMachine) {} |
michael@0 | 2091 | NS_IMETHOD Run() { |
michael@0 | 2092 | NS_DispatchToMainThread(new nsDecoderDisposeEvent(mDecoder.forget(), |
michael@0 | 2093 | mStateMachine.forget())); |
michael@0 | 2094 | return NS_OK; |
michael@0 | 2095 | } |
michael@0 | 2096 | private: |
michael@0 | 2097 | nsRefPtr<MediaDecoder> mDecoder; |
michael@0 | 2098 | nsRefPtr<MediaDecoderStateMachine> mStateMachine; |
michael@0 | 2099 | }; |
michael@0 | 2100 | |
michael@0 | 2101 | nsresult MediaDecoderStateMachine::RunStateMachine() |
michael@0 | 2102 | { |
michael@0 | 2103 | AssertCurrentThreadInMonitor(); |
michael@0 | 2104 | |
michael@0 | 2105 | MediaResource* resource = mDecoder->GetResource(); |
michael@0 | 2106 | NS_ENSURE_TRUE(resource, NS_ERROR_NULL_POINTER); |
michael@0 | 2107 | |
michael@0 | 2108 | switch (mState) { |
michael@0 | 2109 | case DECODER_STATE_SHUTDOWN: { |
michael@0 | 2110 | if (IsPlaying()) { |
michael@0 | 2111 | StopPlayback(); |
michael@0 | 2112 | } |
michael@0 | 2113 | StopAudioThread(); |
michael@0 | 2114 | // If mAudioThread is non-null after StopAudioThread completes, we are |
michael@0 | 2115 | // running in a nested event loop waiting for Shutdown() on |
michael@0 | 2116 | // mAudioThread to complete. Return to the event loop and let it |
michael@0 | 2117 | // finish processing before continuing with shutdown. |
michael@0 | 2118 | if (mAudioThread) { |
michael@0 | 2119 | MOZ_ASSERT(mStopAudioThread); |
michael@0 | 2120 | return NS_OK; |
michael@0 | 2121 | } |
michael@0 | 2122 | |
michael@0 | 2123 | // The reader's listeners hold references to the state machine, |
michael@0 | 2124 | // creating a cycle which keeps the state machine and its shared |
michael@0 | 2125 | // thread pools alive. So break it here. |
michael@0 | 2126 | AudioQueue().ClearListeners(); |
michael@0 | 2127 | VideoQueue().ClearListeners(); |
michael@0 | 2128 | |
michael@0 | 2129 | { |
michael@0 | 2130 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2131 | // Wait for the thread decoding to exit. |
michael@0 | 2132 | mDecodeTaskQueue->Shutdown(); |
michael@0 | 2133 | mDecodeTaskQueue = nullptr; |
michael@0 | 2134 | mReader->ReleaseMediaResources(); |
michael@0 | 2135 | } |
michael@0 | 2136 | // Now that those threads are stopped, there's no possibility of |
michael@0 | 2137 | // mPendingWakeDecoder being needed again. Revoke it. |
michael@0 | 2138 | mPendingWakeDecoder = nullptr; |
michael@0 | 2139 | |
michael@0 | 2140 | MOZ_ASSERT(mState == DECODER_STATE_SHUTDOWN, |
michael@0 | 2141 | "How did we escape from the shutdown state?"); |
michael@0 | 2142 | // We must daisy-chain these events to destroy the decoder. We must |
michael@0 | 2143 | // destroy the decoder on the main thread, but we can't destroy the |
michael@0 | 2144 | // decoder while this thread holds the decoder monitor. We can't |
michael@0 | 2145 | // dispatch an event to the main thread to destroy the decoder from |
michael@0 | 2146 | // here, as the event may run before the dispatch returns, and we |
michael@0 | 2147 | // hold the decoder monitor here. We also want to guarantee that the |
michael@0 | 2148 | // state machine is destroyed on the main thread, and so the |
michael@0 | 2149 | // event runner running this function (which holds a reference to the |
michael@0 | 2150 | // state machine) needs to finish and be released in order to allow |
michael@0 | 2151 | // that. So we dispatch an event to run after this event runner has |
michael@0 | 2152 | // finished and released its monitor/references. That event then will |
michael@0 | 2153 | // dispatch an event to the main thread to release the decoder and |
michael@0 | 2154 | // state machine. |
michael@0 | 2155 | GetStateMachineThread()->Dispatch( |
michael@0 | 2156 | new nsDispatchDisposeEvent(mDecoder, this), NS_DISPATCH_NORMAL); |
michael@0 | 2157 | |
michael@0 | 2158 | mTimer->Cancel(); |
michael@0 | 2159 | mTimer = nullptr; |
michael@0 | 2160 | return NS_OK; |
michael@0 | 2161 | } |
michael@0 | 2162 | |
michael@0 | 2163 | case DECODER_STATE_DORMANT: { |
michael@0 | 2164 | if (IsPlaying()) { |
michael@0 | 2165 | StopPlayback(); |
michael@0 | 2166 | } |
michael@0 | 2167 | StopAudioThread(); |
michael@0 | 2168 | // Now that those threads are stopped, there's no possibility of |
michael@0 | 2169 | // mPendingWakeDecoder being needed again. Revoke it. |
michael@0 | 2170 | mPendingWakeDecoder = nullptr; |
michael@0 | 2171 | { |
michael@0 | 2172 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2173 | // Wait for the thread decoding, if any, to exit. |
michael@0 | 2174 | mDecodeTaskQueue->AwaitIdle(); |
michael@0 | 2175 | mReader->ReleaseMediaResources(); |
michael@0 | 2176 | } |
michael@0 | 2177 | return NS_OK; |
michael@0 | 2178 | } |
michael@0 | 2179 | |
michael@0 | 2180 | case DECODER_STATE_WAIT_FOR_RESOURCES: { |
michael@0 | 2181 | return NS_OK; |
michael@0 | 2182 | } |
michael@0 | 2183 | |
michael@0 | 2184 | case DECODER_STATE_DECODING_METADATA: { |
michael@0 | 2185 | // Ensure we have a decode thread to decode metadata. |
michael@0 | 2186 | return EnqueueDecodeMetadataTask(); |
michael@0 | 2187 | } |
michael@0 | 2188 | |
michael@0 | 2189 | case DECODER_STATE_DECODING: { |
michael@0 | 2190 | if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 2191 | IsPlaying()) |
michael@0 | 2192 | { |
michael@0 | 2193 | // We're playing, but the element/decoder is in paused state. Stop |
michael@0 | 2194 | // playing! |
michael@0 | 2195 | StopPlayback(); |
michael@0 | 2196 | } |
michael@0 | 2197 | |
michael@0 | 2198 | if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 2199 | !IsPlaying()) { |
michael@0 | 2200 | // We are playing, but the state machine does not know it yet. Tell it |
michael@0 | 2201 | // that it is, so that the clock can be properly queried. |
michael@0 | 2202 | StartPlayback(); |
michael@0 | 2203 | } |
michael@0 | 2204 | |
michael@0 | 2205 | AdvanceFrame(); |
michael@0 | 2206 | NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING || |
michael@0 | 2207 | IsStateMachineScheduled() || |
michael@0 | 2208 | mPlaybackRate == 0.0, "Must have timer scheduled"); |
michael@0 | 2209 | return NS_OK; |
michael@0 | 2210 | } |
michael@0 | 2211 | |
michael@0 | 2212 | case DECODER_STATE_BUFFERING: { |
michael@0 | 2213 | TimeStamp now = TimeStamp::Now(); |
michael@0 | 2214 | NS_ASSERTION(!mBufferingStart.IsNull(), "Must know buffering start time."); |
michael@0 | 2215 | |
michael@0 | 2216 | // We will remain in the buffering state if we've not decoded enough |
michael@0 | 2217 | // data to begin playback, or if we've not downloaded a reasonable |
michael@0 | 2218 | // amount of data inside our buffering time. |
michael@0 | 2219 | TimeDuration elapsed = now - mBufferingStart; |
michael@0 | 2220 | bool isLiveStream = resource->GetLength() == -1; |
michael@0 | 2221 | if ((isLiveStream || !mDecoder->CanPlayThrough()) && |
michael@0 | 2222 | elapsed < TimeDuration::FromSeconds(mBufferingWait * mPlaybackRate) && |
michael@0 | 2223 | (mQuickBuffering ? HasLowDecodedData(QUICK_BUFFERING_LOW_DATA_USECS) |
michael@0 | 2224 | : HasLowUndecodedData(mBufferingWait * USECS_PER_S)) && |
michael@0 | 2225 | !mDecoder->IsDataCachedToEndOfResource() && |
michael@0 | 2226 | !resource->IsSuspended()) |
michael@0 | 2227 | { |
michael@0 | 2228 | DECODER_LOG(PR_LOG_DEBUG, "Buffering: wait %ds, timeout in %.3lfs %s", |
michael@0 | 2229 | mBufferingWait, mBufferingWait - elapsed.ToSeconds(), |
michael@0 | 2230 | (mQuickBuffering ? "(quick exit)" : "")); |
michael@0 | 2231 | ScheduleStateMachine(USECS_PER_S); |
michael@0 | 2232 | return NS_OK; |
michael@0 | 2233 | } else { |
michael@0 | 2234 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from BUFFERING to DECODING"); |
michael@0 | 2235 | DECODER_LOG(PR_LOG_DEBUG, "Buffered for %.3lfs", (now - mBufferingStart).ToSeconds()); |
michael@0 | 2236 | StartDecoding(); |
michael@0 | 2237 | } |
michael@0 | 2238 | |
michael@0 | 2239 | // Notify to allow blocked decoder thread to continue |
michael@0 | 2240 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 2241 | UpdateReadyState(); |
michael@0 | 2242 | if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 2243 | !IsPlaying()) |
michael@0 | 2244 | { |
michael@0 | 2245 | StartPlayback(); |
michael@0 | 2246 | } |
michael@0 | 2247 | NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled"); |
michael@0 | 2248 | return NS_OK; |
michael@0 | 2249 | } |
michael@0 | 2250 | |
michael@0 | 2251 | case DECODER_STATE_SEEKING: { |
michael@0 | 2252 | // Ensure we have a decode thread to perform the seek. |
michael@0 | 2253 | return EnqueueDecodeSeekTask(); |
michael@0 | 2254 | } |
michael@0 | 2255 | |
michael@0 | 2256 | case DECODER_STATE_COMPLETED: { |
michael@0 | 2257 | // Play the remaining media. We want to run AdvanceFrame() at least |
michael@0 | 2258 | // once to ensure the current playback position is advanced to the |
michael@0 | 2259 | // end of the media, and so that we update the readyState. |
michael@0 | 2260 | if (VideoQueue().GetSize() > 0 || |
michael@0 | 2261 | (HasAudio() && !mAudioCompleted) || |
michael@0 | 2262 | (mDecoder->GetDecodedStream() && !mDecoder->GetDecodedStream()->IsFinished())) |
michael@0 | 2263 | { |
michael@0 | 2264 | AdvanceFrame(); |
michael@0 | 2265 | NS_ASSERTION(mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING || |
michael@0 | 2266 | mPlaybackRate == 0 || |
michael@0 | 2267 | IsStateMachineScheduled(), |
michael@0 | 2268 | "Must have timer scheduled"); |
michael@0 | 2269 | return NS_OK; |
michael@0 | 2270 | } |
michael@0 | 2271 | |
michael@0 | 2272 | // StopPlayback in order to reset the IsPlaying() state so audio |
michael@0 | 2273 | // is restarted correctly. |
michael@0 | 2274 | StopPlayback(); |
michael@0 | 2275 | |
michael@0 | 2276 | if (mState != DECODER_STATE_COMPLETED) { |
michael@0 | 2277 | // While we're presenting a frame we can change state. Whatever changed |
michael@0 | 2278 | // our state should have scheduled another state machine run. |
michael@0 | 2279 | NS_ASSERTION(IsStateMachineScheduled(), "Must have timer scheduled"); |
michael@0 | 2280 | return NS_OK; |
michael@0 | 2281 | } |
michael@0 | 2282 | |
michael@0 | 2283 | StopAudioThread(); |
michael@0 | 2284 | // When we're decoding to a stream, the stream's main-thread finish signal |
michael@0 | 2285 | // will take care of calling MediaDecoder::PlaybackEnded. |
michael@0 | 2286 | if (mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 2287 | !mDecoder->GetDecodedStream()) { |
michael@0 | 2288 | int64_t videoTime = HasVideo() ? mVideoFrameEndTime : 0; |
michael@0 | 2289 | int64_t clockTime = std::max(mEndTime, std::max(videoTime, GetAudioClock())); |
michael@0 | 2290 | UpdatePlaybackPosition(clockTime); |
michael@0 | 2291 | |
michael@0 | 2292 | { |
michael@0 | 2293 | // Wait for the state change is completed in the main thread, |
michael@0 | 2294 | // otherwise we might see |mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING| |
michael@0 | 2295 | // in next loop and send |MediaDecoder::PlaybackEnded| again to trigger 'ended' |
michael@0 | 2296 | // event twice in the media element. |
michael@0 | 2297 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2298 | nsCOMPtr<nsIRunnable> event = |
michael@0 | 2299 | NS_NewRunnableMethod(mDecoder, &MediaDecoder::PlaybackEnded); |
michael@0 | 2300 | NS_DispatchToMainThread(event, NS_DISPATCH_SYNC); |
michael@0 | 2301 | } |
michael@0 | 2302 | } |
michael@0 | 2303 | return NS_OK; |
michael@0 | 2304 | } |
michael@0 | 2305 | } |
michael@0 | 2306 | |
michael@0 | 2307 | return NS_OK; |
michael@0 | 2308 | } |
michael@0 | 2309 | |
michael@0 | 2310 | void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData, |
michael@0 | 2311 | TimeStamp aTarget) |
michael@0 | 2312 | { |
michael@0 | 2313 | NS_ASSERTION(OnStateMachineThread() || OnDecodeThread(), |
michael@0 | 2314 | "Should be on state machine or decode thread."); |
michael@0 | 2315 | mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn(); |
michael@0 | 2316 | |
michael@0 | 2317 | if (aData->mDuplicate) { |
michael@0 | 2318 | return; |
michael@0 | 2319 | } |
michael@0 | 2320 | |
michael@0 | 2321 | VERBOSE_LOG("playing video frame %lld", aData->mTime); |
michael@0 | 2322 | |
michael@0 | 2323 | VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); |
michael@0 | 2324 | if (container) { |
michael@0 | 2325 | container->SetCurrentFrame(ThebesIntSize(aData->mDisplay), aData->mImage, |
michael@0 | 2326 | aTarget); |
michael@0 | 2327 | } |
michael@0 | 2328 | } |
michael@0 | 2329 | |
michael@0 | 2330 | int64_t |
michael@0 | 2331 | MediaDecoderStateMachine::GetAudioClock() |
michael@0 | 2332 | { |
michael@0 | 2333 | // We must hold the decoder monitor while using the audio stream off the |
michael@0 | 2334 | // audio thread to ensure that it doesn't get destroyed on the audio thread |
michael@0 | 2335 | // while we're using it. |
michael@0 | 2336 | AssertCurrentThreadInMonitor(); |
michael@0 | 2337 | if (!HasAudio() || mAudioCaptured) |
michael@0 | 2338 | return -1; |
michael@0 | 2339 | if (!mAudioStream) { |
michael@0 | 2340 | // Audio thread hasn't played any data yet. |
michael@0 | 2341 | return mAudioStartTime; |
michael@0 | 2342 | } |
michael@0 | 2343 | int64_t t = mAudioStream->GetPosition(); |
michael@0 | 2344 | return (t == -1) ? -1 : t + mAudioStartTime; |
michael@0 | 2345 | } |
michael@0 | 2346 | |
michael@0 | 2347 | int64_t MediaDecoderStateMachine::GetVideoStreamPosition() |
michael@0 | 2348 | { |
michael@0 | 2349 | AssertCurrentThreadInMonitor(); |
michael@0 | 2350 | |
michael@0 | 2351 | if (!IsPlaying()) { |
michael@0 | 2352 | return mPlayDuration + mStartTime; |
michael@0 | 2353 | } |
michael@0 | 2354 | |
michael@0 | 2355 | // The playbackRate has been just been changed, reset the playstartTime. |
michael@0 | 2356 | if (mResetPlayStartTime) { |
michael@0 | 2357 | mPlayStartTime = TimeStamp::Now(); |
michael@0 | 2358 | mResetPlayStartTime = false; |
michael@0 | 2359 | } |
michael@0 | 2360 | |
michael@0 | 2361 | int64_t pos = DurationToUsecs(TimeStamp::Now() - mPlayStartTime) + mPlayDuration; |
michael@0 | 2362 | pos -= mBasePosition; |
michael@0 | 2363 | NS_ASSERTION(pos >= 0, "Video stream position should be positive."); |
michael@0 | 2364 | return mBasePosition + pos * mPlaybackRate + mStartTime; |
michael@0 | 2365 | } |
michael@0 | 2366 | |
michael@0 | 2367 | int64_t MediaDecoderStateMachine::GetClock() |
michael@0 | 2368 | { |
michael@0 | 2369 | AssertCurrentThreadInMonitor(); |
michael@0 | 2370 | |
michael@0 | 2371 | // Determine the clock time. If we've got audio, and we've not reached |
michael@0 | 2372 | // the end of the audio, use the audio clock. However if we've finished |
michael@0 | 2373 | // audio, or don't have audio, use the system clock. If our output is being |
michael@0 | 2374 | // fed to a MediaStream, use that stream as the source of the clock. |
michael@0 | 2375 | int64_t clock_time = -1; |
michael@0 | 2376 | DecodedStreamData* stream = mDecoder->GetDecodedStream(); |
michael@0 | 2377 | if (!IsPlaying()) { |
michael@0 | 2378 | clock_time = mPlayDuration + mStartTime; |
michael@0 | 2379 | } else if (stream) { |
michael@0 | 2380 | clock_time = GetCurrentTimeViaMediaStreamSync(); |
michael@0 | 2381 | } else { |
michael@0 | 2382 | int64_t audio_time = GetAudioClock(); |
michael@0 | 2383 | if (HasAudio() && !mAudioCompleted && audio_time != -1) { |
michael@0 | 2384 | clock_time = audio_time; |
michael@0 | 2385 | // Resync against the audio clock, while we're trusting the |
michael@0 | 2386 | // audio clock. This ensures no "drift", particularly on Linux. |
michael@0 | 2387 | mPlayDuration = clock_time - mStartTime; |
michael@0 | 2388 | mPlayStartTime = TimeStamp::Now(); |
michael@0 | 2389 | } else { |
michael@0 | 2390 | // Audio is disabled on this system. Sync to the system clock. |
michael@0 | 2391 | clock_time = GetVideoStreamPosition(); |
michael@0 | 2392 | // Ensure the clock can never go backwards. |
michael@0 | 2393 | NS_ASSERTION(mCurrentFrameTime <= clock_time || mPlaybackRate <= 0, |
michael@0 | 2394 | "Clock should go forwards if the playback rate is > 0."); |
michael@0 | 2395 | } |
michael@0 | 2396 | } |
michael@0 | 2397 | return clock_time; |
michael@0 | 2398 | } |
michael@0 | 2399 | |
michael@0 | 2400 | void MediaDecoderStateMachine::AdvanceFrame() |
michael@0 | 2401 | { |
michael@0 | 2402 | NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread."); |
michael@0 | 2403 | AssertCurrentThreadInMonitor(); |
michael@0 | 2404 | NS_ASSERTION(!HasAudio() || mAudioStartTime != -1, |
michael@0 | 2405 | "Should know audio start time if we have audio."); |
michael@0 | 2406 | |
michael@0 | 2407 | if (mDecoder->GetState() != MediaDecoder::PLAY_STATE_PLAYING) { |
michael@0 | 2408 | return; |
michael@0 | 2409 | } |
michael@0 | 2410 | |
michael@0 | 2411 | // If playbackRate is 0.0, we should stop the progress, but not be in paused |
michael@0 | 2412 | // state, per spec. |
michael@0 | 2413 | if (mPlaybackRate == 0.0) { |
michael@0 | 2414 | return; |
michael@0 | 2415 | } |
michael@0 | 2416 | |
michael@0 | 2417 | int64_t clock_time = GetClock(); |
michael@0 | 2418 | // Skip frames up to the frame at the playback position, and figure out |
michael@0 | 2419 | // the time remaining until it's time to display the next frame. |
michael@0 | 2420 | int64_t remainingTime = AUDIO_DURATION_USECS; |
michael@0 | 2421 | NS_ASSERTION(clock_time >= mStartTime, "Should have positive clock time."); |
michael@0 | 2422 | nsAutoPtr<VideoData> currentFrame; |
michael@0 | 2423 | #ifdef PR_LOGGING |
michael@0 | 2424 | int32_t droppedFrames = 0; |
michael@0 | 2425 | #endif |
michael@0 | 2426 | if (VideoQueue().GetSize() > 0) { |
michael@0 | 2427 | VideoData* frame = VideoQueue().PeekFront(); |
michael@0 | 2428 | while (mRealTime || clock_time >= frame->mTime) { |
michael@0 | 2429 | mVideoFrameEndTime = frame->GetEndTime(); |
michael@0 | 2430 | currentFrame = frame; |
michael@0 | 2431 | #ifdef PR_LOGGING |
michael@0 | 2432 | VERBOSE_LOG("discarding video frame %lld", frame->mTime); |
michael@0 | 2433 | if (droppedFrames++) { |
michael@0 | 2434 | VERBOSE_LOG("discarding video frame %lld (%d so far)", frame->mTime, droppedFrames-1); |
michael@0 | 2435 | } |
michael@0 | 2436 | #endif |
michael@0 | 2437 | VideoQueue().PopFront(); |
michael@0 | 2438 | // Notify the decode thread that the video queue's buffers may have |
michael@0 | 2439 | // free'd up space for more frames. |
michael@0 | 2440 | mDecoder->GetReentrantMonitor().NotifyAll(); |
michael@0 | 2441 | mDecoder->UpdatePlaybackOffset(frame->mOffset); |
michael@0 | 2442 | if (VideoQueue().GetSize() == 0) |
michael@0 | 2443 | break; |
michael@0 | 2444 | frame = VideoQueue().PeekFront(); |
michael@0 | 2445 | } |
michael@0 | 2446 | // Current frame has already been presented, wait until it's time to |
michael@0 | 2447 | // present the next frame. |
michael@0 | 2448 | if (frame && !currentFrame) { |
michael@0 | 2449 | int64_t now = IsPlaying() ? clock_time : mPlayDuration; |
michael@0 | 2450 | |
michael@0 | 2451 | remainingTime = frame->mTime - now; |
michael@0 | 2452 | } |
michael@0 | 2453 | } |
michael@0 | 2454 | |
michael@0 | 2455 | // Check to see if we don't have enough data to play up to the next frame. |
michael@0 | 2456 | // If we don't, switch to buffering mode. |
michael@0 | 2457 | MediaResource* resource = mDecoder->GetResource(); |
michael@0 | 2458 | if (mState == DECODER_STATE_DECODING && |
michael@0 | 2459 | mDecoder->GetState() == MediaDecoder::PLAY_STATE_PLAYING && |
michael@0 | 2460 | HasLowDecodedData(remainingTime + EXHAUSTED_DATA_MARGIN_USECS) && |
michael@0 | 2461 | !mDecoder->IsDataCachedToEndOfResource() && |
michael@0 | 2462 | !resource->IsSuspended()) { |
michael@0 | 2463 | if (JustExitedQuickBuffering() || HasLowUndecodedData()) { |
michael@0 | 2464 | if (currentFrame) { |
michael@0 | 2465 | VideoQueue().PushFront(currentFrame.forget()); |
michael@0 | 2466 | } |
michael@0 | 2467 | StartBuffering(); |
michael@0 | 2468 | // Don't go straight back to the state machine loop since that might |
michael@0 | 2469 | // cause us to start decoding again and we could flip-flop between |
michael@0 | 2470 | // decoding and quick-buffering. |
michael@0 | 2471 | ScheduleStateMachine(USECS_PER_S); |
michael@0 | 2472 | return; |
michael@0 | 2473 | } |
michael@0 | 2474 | } |
michael@0 | 2475 | |
michael@0 | 2476 | // We've got enough data to keep playing until at least the next frame. |
michael@0 | 2477 | // Start playing now if need be. |
michael@0 | 2478 | if (!IsPlaying() && ((mFragmentEndTime >= 0 && clock_time < mFragmentEndTime) || mFragmentEndTime < 0)) { |
michael@0 | 2479 | StartPlayback(); |
michael@0 | 2480 | } |
michael@0 | 2481 | |
michael@0 | 2482 | if (currentFrame) { |
michael@0 | 2483 | // Decode one frame and display it. |
michael@0 | 2484 | TimeStamp presTime = mPlayStartTime - UsecsToDuration(mPlayDuration) + |
michael@0 | 2485 | UsecsToDuration(currentFrame->mTime - mStartTime); |
michael@0 | 2486 | NS_ASSERTION(currentFrame->mTime >= mStartTime, "Should have positive frame time"); |
michael@0 | 2487 | { |
michael@0 | 2488 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2489 | // If we have video, we want to increment the clock in steps of the frame |
michael@0 | 2490 | // duration. |
michael@0 | 2491 | RenderVideoFrame(currentFrame, presTime); |
michael@0 | 2492 | } |
michael@0 | 2493 | // If we're no longer playing after dropping and reacquiring the lock, |
michael@0 | 2494 | // playback must've been stopped on the decode thread (by a seek, for |
michael@0 | 2495 | // example). In that case, the current frame is probably out of date. |
michael@0 | 2496 | if (!IsPlaying()) { |
michael@0 | 2497 | ScheduleStateMachine(); |
michael@0 | 2498 | return; |
michael@0 | 2499 | } |
michael@0 | 2500 | MediaDecoder::FrameStatistics& frameStats = mDecoder->GetFrameStatistics(); |
michael@0 | 2501 | frameStats.NotifyPresentedFrame(); |
michael@0 | 2502 | remainingTime = currentFrame->GetEndTime() - clock_time; |
michael@0 | 2503 | currentFrame = nullptr; |
michael@0 | 2504 | } |
michael@0 | 2505 | |
michael@0 | 2506 | // Cap the current time to the larger of the audio and video end time. |
michael@0 | 2507 | // This ensures that if we're running off the system clock, we don't |
michael@0 | 2508 | // advance the clock to after the media end time. |
michael@0 | 2509 | if (mVideoFrameEndTime != -1 || mAudioEndTime != -1) { |
michael@0 | 2510 | // These will be non -1 if we've displayed a video frame, or played an audio frame. |
michael@0 | 2511 | clock_time = std::min(clock_time, std::max(mVideoFrameEndTime, mAudioEndTime)); |
michael@0 | 2512 | if (clock_time > GetMediaTime()) { |
michael@0 | 2513 | // Only update the playback position if the clock time is greater |
michael@0 | 2514 | // than the previous playback position. The audio clock can |
michael@0 | 2515 | // sometimes report a time less than its previously reported in |
michael@0 | 2516 | // some situations, and we need to gracefully handle that. |
michael@0 | 2517 | UpdatePlaybackPosition(clock_time); |
michael@0 | 2518 | } |
michael@0 | 2519 | } |
michael@0 | 2520 | |
michael@0 | 2521 | // If the number of audio/video frames queued has changed, either by |
michael@0 | 2522 | // this function popping and playing a video frame, or by the audio |
michael@0 | 2523 | // thread popping and playing an audio frame, we may need to update our |
michael@0 | 2524 | // ready state. Post an update to do so. |
michael@0 | 2525 | UpdateReadyState(); |
michael@0 | 2526 | |
michael@0 | 2527 | ScheduleStateMachine(remainingTime); |
michael@0 | 2528 | } |
michael@0 | 2529 | |
michael@0 | 2530 | void MediaDecoderStateMachine::Wait(int64_t aUsecs) { |
michael@0 | 2531 | NS_ASSERTION(OnAudioThread(), "Only call on the audio thread"); |
michael@0 | 2532 | AssertCurrentThreadInMonitor(); |
michael@0 | 2533 | TimeStamp end = TimeStamp::Now() + UsecsToDuration(std::max<int64_t>(USECS_PER_MS, aUsecs)); |
michael@0 | 2534 | TimeStamp now; |
michael@0 | 2535 | while ((now = TimeStamp::Now()) < end && |
michael@0 | 2536 | mState != DECODER_STATE_SHUTDOWN && |
michael@0 | 2537 | mState != DECODER_STATE_SEEKING && |
michael@0 | 2538 | !mStopAudioThread && |
michael@0 | 2539 | IsPlaying()) |
michael@0 | 2540 | { |
michael@0 | 2541 | int64_t ms = static_cast<int64_t>(NS_round((end - now).ToSeconds() * 1000)); |
michael@0 | 2542 | if (ms == 0 || ms > UINT32_MAX) { |
michael@0 | 2543 | break; |
michael@0 | 2544 | } |
michael@0 | 2545 | mDecoder->GetReentrantMonitor().Wait(PR_MillisecondsToInterval(static_cast<uint32_t>(ms))); |
michael@0 | 2546 | } |
michael@0 | 2547 | } |
michael@0 | 2548 | |
michael@0 | 2549 | VideoData* MediaDecoderStateMachine::FindStartTime() |
michael@0 | 2550 | { |
michael@0 | 2551 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 2552 | AssertCurrentThreadInMonitor(); |
michael@0 | 2553 | int64_t startTime = 0; |
michael@0 | 2554 | mStartTime = 0; |
michael@0 | 2555 | VideoData* v = nullptr; |
michael@0 | 2556 | { |
michael@0 | 2557 | ReentrantMonitorAutoExit exitMon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2558 | v = mReader->FindStartTime(startTime); |
michael@0 | 2559 | } |
michael@0 | 2560 | if (startTime != 0) { |
michael@0 | 2561 | mStartTime = startTime; |
michael@0 | 2562 | if (mGotDurationFromMetaData) { |
michael@0 | 2563 | NS_ASSERTION(mEndTime != -1, |
michael@0 | 2564 | "We should have mEndTime as supplied duration here"); |
michael@0 | 2565 | // We were specified a duration from a Content-Duration HTTP header. |
michael@0 | 2566 | // Adjust mEndTime so that mEndTime-mStartTime matches the specified |
michael@0 | 2567 | // duration. |
michael@0 | 2568 | mEndTime = mStartTime + mEndTime; |
michael@0 | 2569 | } |
michael@0 | 2570 | } |
michael@0 | 2571 | // Set the audio start time to be start of media. If this lies before the |
michael@0 | 2572 | // first actual audio frame we have, we'll inject silence during playback |
michael@0 | 2573 | // to ensure the audio starts at the correct time. |
michael@0 | 2574 | mAudioStartTime = mStartTime; |
michael@0 | 2575 | DECODER_LOG(PR_LOG_DEBUG, "Media start time is %lld", mStartTime); |
michael@0 | 2576 | return v; |
michael@0 | 2577 | } |
michael@0 | 2578 | |
michael@0 | 2579 | void MediaDecoderStateMachine::UpdateReadyState() { |
michael@0 | 2580 | AssertCurrentThreadInMonitor(); |
michael@0 | 2581 | |
michael@0 | 2582 | MediaDecoderOwner::NextFrameStatus nextFrameStatus = GetNextFrameStatus(); |
michael@0 | 2583 | if (nextFrameStatus == mLastFrameStatus) { |
michael@0 | 2584 | return; |
michael@0 | 2585 | } |
michael@0 | 2586 | mLastFrameStatus = nextFrameStatus; |
michael@0 | 2587 | |
michael@0 | 2588 | /* This is a bit tricky. MediaDecoder::UpdateReadyStateForData will run on |
michael@0 | 2589 | * the main thread and re-evaluate GetNextFrameStatus there, passing it to |
michael@0 | 2590 | * HTMLMediaElement::UpdateReadyStateForData. It doesn't use the value of |
michael@0 | 2591 | * GetNextFrameStatus we computed here, because what we're computing here |
michael@0 | 2592 | * could be stale by the time MediaDecoder::UpdateReadyStateForData runs. |
michael@0 | 2593 | * We only compute GetNextFrameStatus here to avoid posting runnables to the main |
michael@0 | 2594 | * thread unnecessarily. |
michael@0 | 2595 | */ |
michael@0 | 2596 | nsCOMPtr<nsIRunnable> event; |
michael@0 | 2597 | event = NS_NewRunnableMethod(mDecoder, &MediaDecoder::UpdateReadyStateForData); |
michael@0 | 2598 | NS_DispatchToMainThread(event, NS_DISPATCH_NORMAL); |
michael@0 | 2599 | } |
michael@0 | 2600 | |
michael@0 | 2601 | bool MediaDecoderStateMachine::JustExitedQuickBuffering() |
michael@0 | 2602 | { |
michael@0 | 2603 | return !mDecodeStartTime.IsNull() && |
michael@0 | 2604 | mQuickBuffering && |
michael@0 | 2605 | (TimeStamp::Now() - mDecodeStartTime) < TimeDuration::FromMicroseconds(QUICK_BUFFER_THRESHOLD_USECS); |
michael@0 | 2606 | } |
michael@0 | 2607 | |
michael@0 | 2608 | void MediaDecoderStateMachine::StartBuffering() |
michael@0 | 2609 | { |
michael@0 | 2610 | AssertCurrentThreadInMonitor(); |
michael@0 | 2611 | |
michael@0 | 2612 | if (mState != DECODER_STATE_DECODING) { |
michael@0 | 2613 | // We only move into BUFFERING state if we're actually decoding. |
michael@0 | 2614 | // If we're currently doing something else, we don't need to buffer, |
michael@0 | 2615 | // and more importantly, we shouldn't overwrite mState to interrupt |
michael@0 | 2616 | // the current operation, as that could leave us in an inconsistent |
michael@0 | 2617 | // state! |
michael@0 | 2618 | return; |
michael@0 | 2619 | } |
michael@0 | 2620 | |
michael@0 | 2621 | if (IsPlaying()) { |
michael@0 | 2622 | StopPlayback(); |
michael@0 | 2623 | } |
michael@0 | 2624 | |
michael@0 | 2625 | TimeDuration decodeDuration = TimeStamp::Now() - mDecodeStartTime; |
michael@0 | 2626 | // Go into quick buffering mode provided we've not just left buffering using |
michael@0 | 2627 | // a "quick exit". This stops us flip-flopping between playing and buffering |
michael@0 | 2628 | // when the download speed is similar to the decode speed. |
michael@0 | 2629 | mQuickBuffering = |
michael@0 | 2630 | !JustExitedQuickBuffering() && |
michael@0 | 2631 | decodeDuration < UsecsToDuration(QUICK_BUFFER_THRESHOLD_USECS); |
michael@0 | 2632 | mBufferingStart = TimeStamp::Now(); |
michael@0 | 2633 | |
michael@0 | 2634 | // We need to tell the element that buffering has started. |
michael@0 | 2635 | // We can't just directly send an asynchronous runnable that |
michael@0 | 2636 | // eventually fires the "waiting" event. The problem is that |
michael@0 | 2637 | // there might be pending main-thread events, such as "data |
michael@0 | 2638 | // received" notifications, that mean we're not actually still |
michael@0 | 2639 | // buffering by the time this runnable executes. So instead |
michael@0 | 2640 | // we just trigger UpdateReadyStateForData; when it runs, it |
michael@0 | 2641 | // will check the current state and decide whether to tell |
michael@0 | 2642 | // the element we're buffering or not. |
michael@0 | 2643 | UpdateReadyState(); |
michael@0 | 2644 | mState = DECODER_STATE_BUFFERING; |
michael@0 | 2645 | DECODER_LOG(PR_LOG_DEBUG, "Changed state from DECODING to BUFFERING, decoded for %.3lfs", |
michael@0 | 2646 | decodeDuration.ToSeconds()); |
michael@0 | 2647 | #ifdef PR_LOGGING |
michael@0 | 2648 | MediaDecoder::Statistics stats = mDecoder->GetStatistics(); |
michael@0 | 2649 | DECODER_LOG(PR_LOG_DEBUG, "Playback rate: %.1lfKB/s%s download rate: %.1lfKB/s%s", |
michael@0 | 2650 | stats.mPlaybackRate/1024, stats.mPlaybackRateReliable ? "" : " (unreliable)", |
michael@0 | 2651 | stats.mDownloadRate/1024, stats.mDownloadRateReliable ? "" : " (unreliable)"); |
michael@0 | 2652 | #endif |
michael@0 | 2653 | } |
michael@0 | 2654 | |
michael@0 | 2655 | nsresult MediaDecoderStateMachine::GetBuffered(dom::TimeRanges* aBuffered) { |
michael@0 | 2656 | MediaResource* resource = mDecoder->GetResource(); |
michael@0 | 2657 | NS_ENSURE_TRUE(resource, NS_ERROR_FAILURE); |
michael@0 | 2658 | resource->Pin(); |
michael@0 | 2659 | nsresult res = mReader->GetBuffered(aBuffered, mStartTime); |
michael@0 | 2660 | resource->Unpin(); |
michael@0 | 2661 | return res; |
michael@0 | 2662 | } |
michael@0 | 2663 | |
michael@0 | 2664 | nsresult MediaDecoderStateMachine::CallRunStateMachine() |
michael@0 | 2665 | { |
michael@0 | 2666 | AssertCurrentThreadInMonitor(); |
michael@0 | 2667 | NS_ASSERTION(OnStateMachineThread(), "Should be on state machine thread."); |
michael@0 | 2668 | |
michael@0 | 2669 | // If audio is being captured, stop the audio thread if it's running |
michael@0 | 2670 | if (mAudioCaptured) { |
michael@0 | 2671 | StopAudioThread(); |
michael@0 | 2672 | } |
michael@0 | 2673 | |
michael@0 | 2674 | MOZ_ASSERT(!mInRunningStateMachine, "State machine cycles must run in sequence!"); |
michael@0 | 2675 | mTimeout = TimeStamp(); |
michael@0 | 2676 | mInRunningStateMachine = true; |
michael@0 | 2677 | nsresult res = RunStateMachine(); |
michael@0 | 2678 | mInRunningStateMachine = false; |
michael@0 | 2679 | return res; |
michael@0 | 2680 | } |
michael@0 | 2681 | |
michael@0 | 2682 | nsresult MediaDecoderStateMachine::TimeoutExpired(int aTimerId) |
michael@0 | 2683 | { |
michael@0 | 2684 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2685 | NS_ASSERTION(OnStateMachineThread(), "Must be on state machine thread"); |
michael@0 | 2686 | mTimer->Cancel(); |
michael@0 | 2687 | if (mTimerId == aTimerId) { |
michael@0 | 2688 | return CallRunStateMachine(); |
michael@0 | 2689 | } else { |
michael@0 | 2690 | return NS_OK; |
michael@0 | 2691 | } |
michael@0 | 2692 | } |
michael@0 | 2693 | |
michael@0 | 2694 | void MediaDecoderStateMachine::ScheduleStateMachineWithLockAndWakeDecoder() { |
michael@0 | 2695 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2696 | DispatchAudioDecodeTaskIfNeeded(); |
michael@0 | 2697 | DispatchVideoDecodeTaskIfNeeded(); |
michael@0 | 2698 | } |
michael@0 | 2699 | |
michael@0 | 2700 | class TimerEvent : public nsITimerCallback, public nsRunnable { |
michael@0 | 2701 | NS_DECL_THREADSAFE_ISUPPORTS |
michael@0 | 2702 | public: |
michael@0 | 2703 | TimerEvent(MediaDecoderStateMachine* aStateMachine, int aTimerId) |
michael@0 | 2704 | : mStateMachine(aStateMachine), mTimerId(aTimerId) {} |
michael@0 | 2705 | |
michael@0 | 2706 | NS_IMETHOD Run() MOZ_OVERRIDE { |
michael@0 | 2707 | return mStateMachine->TimeoutExpired(mTimerId); |
michael@0 | 2708 | } |
michael@0 | 2709 | |
michael@0 | 2710 | NS_IMETHOD Notify(nsITimer* aTimer) { |
michael@0 | 2711 | return mStateMachine->TimeoutExpired(mTimerId); |
michael@0 | 2712 | } |
michael@0 | 2713 | private: |
michael@0 | 2714 | const nsRefPtr<MediaDecoderStateMachine> mStateMachine; |
michael@0 | 2715 | int mTimerId; |
michael@0 | 2716 | }; |
michael@0 | 2717 | |
michael@0 | 2718 | NS_IMPL_ISUPPORTS(TimerEvent, nsITimerCallback, nsIRunnable); |
michael@0 | 2719 | |
michael@0 | 2720 | nsresult MediaDecoderStateMachine::ScheduleStateMachine(int64_t aUsecs) { |
michael@0 | 2721 | AssertCurrentThreadInMonitor(); |
michael@0 | 2722 | NS_ABORT_IF_FALSE(GetStateMachineThread(), |
michael@0 | 2723 | "Must have a state machine thread to schedule"); |
michael@0 | 2724 | |
michael@0 | 2725 | if (mState == DECODER_STATE_SHUTDOWN) { |
michael@0 | 2726 | return NS_ERROR_FAILURE; |
michael@0 | 2727 | } |
michael@0 | 2728 | aUsecs = std::max<int64_t>(aUsecs, 0); |
michael@0 | 2729 | |
michael@0 | 2730 | TimeStamp timeout = TimeStamp::Now() + UsecsToDuration(aUsecs); |
michael@0 | 2731 | if (!mTimeout.IsNull() && timeout >= mTimeout) { |
michael@0 | 2732 | // We've already scheduled a timer set to expire at or before this time, |
michael@0 | 2733 | // or have an event dispatched to run the state machine. |
michael@0 | 2734 | return NS_OK; |
michael@0 | 2735 | } |
michael@0 | 2736 | |
michael@0 | 2737 | uint32_t ms = static_cast<uint32_t>((aUsecs / USECS_PER_MS) & 0xFFFFFFFF); |
michael@0 | 2738 | if (mRealTime && ms > 40) { |
michael@0 | 2739 | ms = 40; |
michael@0 | 2740 | } |
michael@0 | 2741 | |
michael@0 | 2742 | // Don't cancel the timer here for this function will be called from |
michael@0 | 2743 | // different threads. |
michael@0 | 2744 | |
michael@0 | 2745 | nsresult rv = NS_ERROR_FAILURE; |
michael@0 | 2746 | nsRefPtr<TimerEvent> event = new TimerEvent(this, mTimerId+1); |
michael@0 | 2747 | |
michael@0 | 2748 | if (ms == 0) { |
michael@0 | 2749 | // Dispatch a runnable to the state machine thread when delay is 0. |
michael@0 | 2750 | // It will has less latency than dispatching a runnable to the state |
michael@0 | 2751 | // machine thread which will then schedule a zero-delay timer. |
michael@0 | 2752 | rv = GetStateMachineThread()->Dispatch(event, NS_DISPATCH_NORMAL); |
michael@0 | 2753 | } else if (OnStateMachineThread()) { |
michael@0 | 2754 | rv = mTimer->InitWithCallback(event, ms, nsITimer::TYPE_ONE_SHOT); |
michael@0 | 2755 | } else { |
michael@0 | 2756 | MOZ_ASSERT(false, "non-zero delay timer should be only scheduled in state machine thread"); |
michael@0 | 2757 | } |
michael@0 | 2758 | |
michael@0 | 2759 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 2760 | mTimeout = timeout; |
michael@0 | 2761 | ++mTimerId; |
michael@0 | 2762 | } else { |
michael@0 | 2763 | NS_WARNING("Failed to schedule state machine"); |
michael@0 | 2764 | } |
michael@0 | 2765 | |
michael@0 | 2766 | return rv; |
michael@0 | 2767 | } |
michael@0 | 2768 | |
michael@0 | 2769 | bool MediaDecoderStateMachine::OnDecodeThread() const |
michael@0 | 2770 | { |
michael@0 | 2771 | return mDecodeTaskQueue->IsCurrentThreadIn(); |
michael@0 | 2772 | } |
michael@0 | 2773 | |
michael@0 | 2774 | bool MediaDecoderStateMachine::OnStateMachineThread() const |
michael@0 | 2775 | { |
michael@0 | 2776 | bool rv = false; |
michael@0 | 2777 | mStateMachineThreadPool->IsOnCurrentThread(&rv); |
michael@0 | 2778 | return rv; |
michael@0 | 2779 | } |
michael@0 | 2780 | |
michael@0 | 2781 | nsIEventTarget* MediaDecoderStateMachine::GetStateMachineThread() |
michael@0 | 2782 | { |
michael@0 | 2783 | return mStateMachineThreadPool->GetEventTarget(); |
michael@0 | 2784 | } |
michael@0 | 2785 | |
michael@0 | 2786 | void MediaDecoderStateMachine::SetPlaybackRate(double aPlaybackRate) |
michael@0 | 2787 | { |
michael@0 | 2788 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 2789 | NS_ASSERTION(aPlaybackRate != 0, |
michael@0 | 2790 | "PlaybackRate == 0 should be handled before this function."); |
michael@0 | 2791 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2792 | |
michael@0 | 2793 | if (mPlaybackRate == aPlaybackRate) { |
michael@0 | 2794 | return; |
michael@0 | 2795 | } |
michael@0 | 2796 | |
michael@0 | 2797 | // Get position of the last time we changed the rate. |
michael@0 | 2798 | if (!HasAudio()) { |
michael@0 | 2799 | // mBasePosition is a position in the video stream, not an absolute time. |
michael@0 | 2800 | if (mState == DECODER_STATE_SEEKING) { |
michael@0 | 2801 | mBasePosition = mSeekTarget.mTime - mStartTime; |
michael@0 | 2802 | } else { |
michael@0 | 2803 | mBasePosition = GetVideoStreamPosition(); |
michael@0 | 2804 | } |
michael@0 | 2805 | mPlayDuration = mBasePosition; |
michael@0 | 2806 | mResetPlayStartTime = true; |
michael@0 | 2807 | mPlayStartTime = TimeStamp::Now(); |
michael@0 | 2808 | } |
michael@0 | 2809 | |
michael@0 | 2810 | mPlaybackRate = aPlaybackRate; |
michael@0 | 2811 | } |
michael@0 | 2812 | |
michael@0 | 2813 | void MediaDecoderStateMachine::SetPreservesPitch(bool aPreservesPitch) |
michael@0 | 2814 | { |
michael@0 | 2815 | NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); |
michael@0 | 2816 | ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor()); |
michael@0 | 2817 | |
michael@0 | 2818 | mPreservesPitch = aPreservesPitch; |
michael@0 | 2819 | } |
michael@0 | 2820 | |
michael@0 | 2821 | void |
michael@0 | 2822 | MediaDecoderStateMachine::SetMinimizePrerollUntilPlaybackStarts() |
michael@0 | 2823 | { |
michael@0 | 2824 | AssertCurrentThreadInMonitor(); |
michael@0 | 2825 | mMinimizePreroll = true; |
michael@0 | 2826 | } |
michael@0 | 2827 | |
michael@0 | 2828 | bool MediaDecoderStateMachine::IsShutdown() |
michael@0 | 2829 | { |
michael@0 | 2830 | AssertCurrentThreadInMonitor(); |
michael@0 | 2831 | return GetState() == DECODER_STATE_SHUTDOWN; |
michael@0 | 2832 | } |
michael@0 | 2833 | |
michael@0 | 2834 | void MediaDecoderStateMachine::QueueMetadata(int64_t aPublishTime, |
michael@0 | 2835 | int aChannels, |
michael@0 | 2836 | int aRate, |
michael@0 | 2837 | bool aHasAudio, |
michael@0 | 2838 | bool aHasVideo, |
michael@0 | 2839 | MetadataTags* aTags) |
michael@0 | 2840 | { |
michael@0 | 2841 | NS_ASSERTION(OnDecodeThread(), "Should be on decode thread."); |
michael@0 | 2842 | AssertCurrentThreadInMonitor(); |
michael@0 | 2843 | TimedMetadata* metadata = new TimedMetadata; |
michael@0 | 2844 | metadata->mPublishTime = aPublishTime; |
michael@0 | 2845 | metadata->mChannels = aChannels; |
michael@0 | 2846 | metadata->mRate = aRate; |
michael@0 | 2847 | metadata->mHasAudio = aHasAudio; |
michael@0 | 2848 | metadata->mHasVideo = aHasVideo; |
michael@0 | 2849 | metadata->mTags = aTags; |
michael@0 | 2850 | mMetadataManager.QueueMetadata(metadata); |
michael@0 | 2851 | } |
michael@0 | 2852 | |
michael@0 | 2853 | } // namespace mozilla |
michael@0 | 2854 | |
michael@0 | 2855 | // avoid redefined macro in unified build |
michael@0 | 2856 | #undef DECODER_LOG |
michael@0 | 2857 | #undef VERBOSE_LOG |