content/media/webm/WebMReader.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/content/media/webm/WebMReader.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1093 @@
     1.4 +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
     1.5 +/* vim:set ts=2 sw=2 sts=2 et cindent: */
     1.6 +/* This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +#include "nsError.h"
    1.10 +#include "MediaDecoderStateMachine.h"
    1.11 +#include "AbstractMediaDecoder.h"
    1.12 +#include "MediaResource.h"
    1.13 +#include "WebMReader.h"
    1.14 +#include "WebMBufferedParser.h"
    1.15 +#include "mozilla/dom/TimeRanges.h"
    1.16 +#include "VorbisUtils.h"
    1.17 +#include "gfx2DGlue.h"
    1.18 +
    1.19 +#include <algorithm>
    1.20 +
    1.21 +#define VPX_DONT_DEFINE_STDINT_TYPES
    1.22 +#include "vpx/vp8dx.h"
    1.23 +#include "vpx/vpx_decoder.h"
    1.24 +
    1.25 +#include "OggReader.h"
    1.26 +
    1.27 +using mozilla::NesteggPacketHolder;
    1.28 +
    1.29 +template <>
    1.30 +class nsAutoRefTraits<NesteggPacketHolder> : public nsPointerRefTraits<NesteggPacketHolder>
    1.31 +{
    1.32 +public:
    1.33 +  static void Release(NesteggPacketHolder* aHolder) { delete aHolder; }
    1.34 +};
    1.35 +
    1.36 +namespace mozilla {
    1.37 +
    1.38 +using namespace gfx;
    1.39 +using namespace layers;
    1.40 +
    1.41 +// Un-comment to enable logging of seek bisections.
    1.42 +//#define SEEK_LOGGING
    1.43 +
    1.44 +#ifdef PR_LOGGING
    1.45 +extern PRLogModuleInfo* gMediaDecoderLog;
    1.46 +PRLogModuleInfo* gNesteggLog;
    1.47 +#define LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg)
    1.48 +#ifdef SEEK_LOGGING
    1.49 +#define SEEK_LOG(type, msg) PR_LOG(gMediaDecoderLog, type, msg)
    1.50 +#else
    1.51 +#define SEEK_LOG(type, msg)
    1.52 +#endif
    1.53 +#else
    1.54 +#define LOG(type, msg)
    1.55 +#define SEEK_LOG(type, msg)
    1.56 +#endif
    1.57 +
    1.58 +static const unsigned NS_PER_USEC = 1000;
    1.59 +static const double NS_PER_S = 1e9;
    1.60 +
    1.61 +// Functions for reading and seeking using MediaResource required for
    1.62 +// nestegg_io. The 'user data' passed to these functions is the
    1.63 +// decoder from which the media resource is obtained.
    1.64 +static int webm_read(void *aBuffer, size_t aLength, void *aUserData)
    1.65 +{
    1.66 +  NS_ASSERTION(aUserData, "aUserData must point to a valid AbstractMediaDecoder");
    1.67 +  AbstractMediaDecoder* decoder = reinterpret_cast<AbstractMediaDecoder*>(aUserData);
    1.68 +  MediaResource* resource = decoder->GetResource();
    1.69 +  NS_ASSERTION(resource, "Decoder has no media resource");
    1.70 +
    1.71 +  nsresult rv = NS_OK;
    1.72 +  bool eof = false;
    1.73 +
    1.74 +  char *p = static_cast<char *>(aBuffer);
    1.75 +  while (NS_SUCCEEDED(rv) && aLength > 0) {
    1.76 +    uint32_t bytes = 0;
    1.77 +    rv = resource->Read(p, aLength, &bytes);
    1.78 +    if (bytes == 0) {
    1.79 +      eof = true;
    1.80 +      break;
    1.81 +    }
    1.82 +    aLength -= bytes;
    1.83 +    p += bytes;
    1.84 +  }
    1.85 +
    1.86 +  return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
    1.87 +}
    1.88 +
    1.89 +static int webm_seek(int64_t aOffset, int aWhence, void *aUserData)
    1.90 +{
    1.91 +  NS_ASSERTION(aUserData, "aUserData must point to a valid AbstractMediaDecoder");
    1.92 +  AbstractMediaDecoder* decoder = reinterpret_cast<AbstractMediaDecoder*>(aUserData);
    1.93 +  MediaResource* resource = decoder->GetResource();
    1.94 +  NS_ASSERTION(resource, "Decoder has no media resource");
    1.95 +  nsresult rv = resource->Seek(aWhence, aOffset);
    1.96 +  return NS_SUCCEEDED(rv) ? 0 : -1;
    1.97 +}
    1.98 +
    1.99 +static int64_t webm_tell(void *aUserData)
   1.100 +{
   1.101 +  NS_ASSERTION(aUserData, "aUserData must point to a valid AbstractMediaDecoder");
   1.102 +  AbstractMediaDecoder* decoder = reinterpret_cast<AbstractMediaDecoder*>(aUserData);
   1.103 +  MediaResource* resource = decoder->GetResource();
   1.104 +  NS_ASSERTION(resource, "Decoder has no media resource");
   1.105 +  return resource->Tell();
   1.106 +}
   1.107 +
   1.108 +static void webm_log(nestegg * context,
   1.109 +                     unsigned int severity,
   1.110 +                     char const * format, ...)
   1.111 +{
   1.112 +#ifdef PR_LOGGING
   1.113 +  va_list args;
   1.114 +  char msg[256];
   1.115 +  const char * sevStr;
   1.116 +
   1.117 +  switch(severity) {
   1.118 +    case NESTEGG_LOG_DEBUG:
   1.119 +      sevStr = "DBG";
   1.120 +      break;
   1.121 +    case NESTEGG_LOG_INFO:
   1.122 +      sevStr = "INF";
   1.123 +      break;
   1.124 +    case NESTEGG_LOG_WARNING:
   1.125 +      sevStr = "WRN";
   1.126 +      break;
   1.127 +    case NESTEGG_LOG_ERROR:
   1.128 +      sevStr = "ERR";
   1.129 +      break;
   1.130 +    case NESTEGG_LOG_CRITICAL:
   1.131 +      sevStr = "CRT";
   1.132 +      break;
   1.133 +    default:
   1.134 +      sevStr = "UNK";
   1.135 +      break;
   1.136 +  }
   1.137 +
   1.138 +  va_start(args, format);
   1.139 +
   1.140 +  PR_snprintf(msg, sizeof(msg), "%p [Nestegg-%s] ", context, sevStr);
   1.141 +  PR_vsnprintf(msg+strlen(msg), sizeof(msg)-strlen(msg), format, args);
   1.142 +  PR_LOG(gNesteggLog, PR_LOG_DEBUG, (msg));
   1.143 +
   1.144 +  va_end(args);
   1.145 +#endif
   1.146 +}
   1.147 +
   1.148 +WebMReader::WebMReader(AbstractMediaDecoder* aDecoder)
   1.149 +  : MediaDecoderReader(aDecoder),
   1.150 +  mContext(nullptr),
   1.151 +  mPacketCount(0),
   1.152 +  mChannels(0),
   1.153 +#ifdef MOZ_OPUS
   1.154 +  mOpusParser(nullptr),
   1.155 +  mOpusDecoder(nullptr),
   1.156 +  mSkip(0),
   1.157 +  mSeekPreroll(0),
   1.158 +#endif
   1.159 +  mVideoTrack(0),
   1.160 +  mAudioTrack(0),
   1.161 +  mAudioStartUsec(-1),
   1.162 +  mAudioFrames(0),
   1.163 +  mAudioCodec(-1),
   1.164 +  mVideoCodec(-1),
   1.165 +  mHasVideo(false),
   1.166 +  mHasAudio(false)
   1.167 +{
   1.168 +  MOZ_COUNT_CTOR(WebMReader);
   1.169 +#ifdef PR_LOGGING
   1.170 +  if (!gNesteggLog) {
   1.171 +    gNesteggLog = PR_NewLogModule("Nestegg");
   1.172 +  }
   1.173 +#endif
   1.174 +  // Zero these member vars to avoid crashes in VP8 destroy and Vorbis clear
   1.175 +  // functions when destructor is called before |Init|.
   1.176 +  memset(&mVPX, 0, sizeof(vpx_codec_ctx_t));
   1.177 +  memset(&mVorbisBlock, 0, sizeof(vorbis_block));
   1.178 +  memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state));
   1.179 +  memset(&mVorbisInfo, 0, sizeof(vorbis_info));
   1.180 +  memset(&mVorbisComment, 0, sizeof(vorbis_comment));
   1.181 +}
   1.182 +
   1.183 +WebMReader::~WebMReader()
   1.184 +{
   1.185 +  Cleanup();
   1.186 +
   1.187 +  mVideoPackets.Reset();
   1.188 +  mAudioPackets.Reset();
   1.189 +
   1.190 +  vpx_codec_destroy(&mVPX);
   1.191 +
   1.192 +  vorbis_block_clear(&mVorbisBlock);
   1.193 +  vorbis_dsp_clear(&mVorbisDsp);
   1.194 +  vorbis_info_clear(&mVorbisInfo);
   1.195 +  vorbis_comment_clear(&mVorbisComment);
   1.196 +
   1.197 +  if (mOpusDecoder) {
   1.198 +    opus_multistream_decoder_destroy(mOpusDecoder);
   1.199 +    mOpusDecoder = nullptr;
   1.200 +  }
   1.201 +
   1.202 +  MOZ_COUNT_DTOR(WebMReader);
   1.203 +}
   1.204 +
   1.205 +nsresult WebMReader::Init(MediaDecoderReader* aCloneDonor)
   1.206 +{
   1.207 +
   1.208 +  vorbis_info_init(&mVorbisInfo);
   1.209 +  vorbis_comment_init(&mVorbisComment);
   1.210 +  memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state));
   1.211 +  memset(&mVorbisBlock, 0, sizeof(vorbis_block));
   1.212 +
   1.213 +  if (aCloneDonor) {
   1.214 +    mBufferedState = static_cast<WebMReader*>(aCloneDonor)->mBufferedState;
   1.215 +  } else {
   1.216 +    mBufferedState = new WebMBufferedState;
   1.217 +  }
   1.218 +
   1.219 +  return NS_OK;
   1.220 +}
   1.221 +
   1.222 +nsresult WebMReader::ResetDecode()
   1.223 +{
   1.224 +  mAudioFrames = 0;
   1.225 +  mAudioStartUsec = -1;
   1.226 +  nsresult res = NS_OK;
   1.227 +  if (NS_FAILED(MediaDecoderReader::ResetDecode())) {
   1.228 +    res = NS_ERROR_FAILURE;
   1.229 +  }
   1.230 +
   1.231 +  if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
   1.232 +    // Ignore failed results from vorbis_synthesis_restart. They
   1.233 +    // aren't fatal and it fails when ResetDecode is called at a
   1.234 +    // time when no vorbis data has been read.
   1.235 +    vorbis_synthesis_restart(&mVorbisDsp);
   1.236 +#ifdef MOZ_OPUS
   1.237 +  } else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
   1.238 +    if (mOpusDecoder) {
   1.239 +      // Reset the decoder.
   1.240 +      opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
   1.241 +      mSkip = mOpusParser->mPreSkip;
   1.242 +    }
   1.243 +#endif
   1.244 +  }
   1.245 +
   1.246 +  mVideoPackets.Reset();
   1.247 +  mAudioPackets.Reset();
   1.248 +
   1.249 +  return res;
   1.250 +}
   1.251 +
   1.252 +void WebMReader::Cleanup()
   1.253 +{
   1.254 +  if (mContext) {
   1.255 +    nestegg_destroy(mContext);
   1.256 +    mContext = nullptr;
   1.257 +  }
   1.258 +}
   1.259 +
   1.260 +nsresult WebMReader::ReadMetadata(MediaInfo* aInfo,
   1.261 +                                  MetadataTags** aTags)
   1.262 +{
   1.263 +  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   1.264 +
   1.265 +  nestegg_io io;
   1.266 +  io.read = webm_read;
   1.267 +  io.seek = webm_seek;
   1.268 +  io.tell = webm_tell;
   1.269 +  io.userdata = mDecoder;
   1.270 +  int64_t maxOffset = -1;
   1.271 +  int r = nestegg_init(&mContext, io, &webm_log, maxOffset);
   1.272 +  if (r == -1) {
   1.273 +    return NS_ERROR_FAILURE;
   1.274 +  }
   1.275 +
   1.276 +  uint64_t duration = 0;
   1.277 +  r = nestegg_duration(mContext, &duration);
   1.278 +  if (r == 0) {
   1.279 +    ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
   1.280 +    mDecoder->SetMediaDuration(duration / NS_PER_USEC);
   1.281 +  }
   1.282 +
   1.283 +  unsigned int ntracks = 0;
   1.284 +  r = nestegg_track_count(mContext, &ntracks);
   1.285 +  if (r == -1) {
   1.286 +    Cleanup();
   1.287 +    return NS_ERROR_FAILURE;
   1.288 +  }
   1.289 +
   1.290 +  for (uint32_t track = 0; track < ntracks; ++track) {
   1.291 +    int id = nestegg_track_codec_id(mContext, track);
   1.292 +    if (id == -1) {
   1.293 +      Cleanup();
   1.294 +      return NS_ERROR_FAILURE;
   1.295 +    }
   1.296 +    int type = nestegg_track_type(mContext, track);
   1.297 +    if (!mHasVideo && type == NESTEGG_TRACK_VIDEO) {
   1.298 +      nestegg_video_params params;
   1.299 +      r = nestegg_track_video_params(mContext, track, &params);
   1.300 +      if (r == -1) {
   1.301 +        Cleanup();
   1.302 +        return NS_ERROR_FAILURE;
   1.303 +      }
   1.304 +
   1.305 +      vpx_codec_iface_t* dx = nullptr;
   1.306 +      mVideoCodec = nestegg_track_codec_id(mContext, track);
   1.307 +      if (mVideoCodec == NESTEGG_CODEC_VP8) {
   1.308 +        dx = vpx_codec_vp8_dx();
   1.309 +      } else if (mVideoCodec == NESTEGG_CODEC_VP9) {
   1.310 +        dx = vpx_codec_vp9_dx();
   1.311 +      }
   1.312 +      if (!dx || vpx_codec_dec_init(&mVPX, dx, nullptr, 0)) {
   1.313 +        Cleanup();
   1.314 +        return NS_ERROR_FAILURE;
   1.315 +      }
   1.316 +
   1.317 +      // Picture region, taking into account cropping, before scaling
   1.318 +      // to the display size.
   1.319 +      nsIntRect pictureRect(params.crop_left,
   1.320 +                            params.crop_top,
   1.321 +                            params.width - (params.crop_right + params.crop_left),
   1.322 +                            params.height - (params.crop_bottom + params.crop_top));
   1.323 +
   1.324 +      // If the cropping data appears invalid then use the frame data
   1.325 +      if (pictureRect.width <= 0 ||
   1.326 +          pictureRect.height <= 0 ||
   1.327 +          pictureRect.x < 0 ||
   1.328 +          pictureRect.y < 0)
   1.329 +      {
   1.330 +        pictureRect.x = 0;
   1.331 +        pictureRect.y = 0;
   1.332 +        pictureRect.width = params.width;
   1.333 +        pictureRect.height = params.height;
   1.334 +      }
   1.335 +
   1.336 +      // Validate the container-reported frame and pictureRect sizes. This ensures
   1.337 +      // that our video frame creation code doesn't overflow.
   1.338 +      nsIntSize displaySize(params.display_width, params.display_height);
   1.339 +      nsIntSize frameSize(params.width, params.height);
   1.340 +      if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) {
   1.341 +        // Video track's frame sizes will overflow. Ignore the video track.
   1.342 +        continue;
   1.343 +      }
   1.344 +
   1.345 +      mVideoTrack = track;
   1.346 +      mHasVideo = true;
   1.347 +      mInfo.mVideo.mHasVideo = true;
   1.348 +
   1.349 +      mInfo.mVideo.mDisplay = displaySize;
   1.350 +      mPicture = pictureRect;
   1.351 +      mInitialFrame = frameSize;
   1.352 +
   1.353 +      switch (params.stereo_mode) {
   1.354 +      case NESTEGG_VIDEO_MONO:
   1.355 +        mInfo.mVideo.mStereoMode = StereoMode::MONO;
   1.356 +        break;
   1.357 +      case NESTEGG_VIDEO_STEREO_LEFT_RIGHT:
   1.358 +        mInfo.mVideo.mStereoMode = StereoMode::LEFT_RIGHT;
   1.359 +        break;
   1.360 +      case NESTEGG_VIDEO_STEREO_BOTTOM_TOP:
   1.361 +        mInfo.mVideo.mStereoMode = StereoMode::BOTTOM_TOP;
   1.362 +        break;
   1.363 +      case NESTEGG_VIDEO_STEREO_TOP_BOTTOM:
   1.364 +        mInfo.mVideo.mStereoMode = StereoMode::TOP_BOTTOM;
   1.365 +        break;
   1.366 +      case NESTEGG_VIDEO_STEREO_RIGHT_LEFT:
   1.367 +        mInfo.mVideo.mStereoMode = StereoMode::RIGHT_LEFT;
   1.368 +        break;
   1.369 +      }
   1.370 +    }
   1.371 +    else if (!mHasAudio && type == NESTEGG_TRACK_AUDIO) {
   1.372 +      nestegg_audio_params params;
   1.373 +      r = nestegg_track_audio_params(mContext, track, &params);
   1.374 +      if (r == -1) {
   1.375 +        Cleanup();
   1.376 +        return NS_ERROR_FAILURE;
   1.377 +      }
   1.378 +
   1.379 +      mAudioTrack = track;
   1.380 +      mHasAudio = true;
   1.381 +      mInfo.mAudio.mHasAudio = true;
   1.382 +      mAudioCodec = nestegg_track_codec_id(mContext, track);
   1.383 +      mCodecDelay = params.codec_delay / NS_PER_USEC;
   1.384 +
   1.385 +      if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
   1.386 +        // Get the Vorbis header data
   1.387 +        unsigned int nheaders = 0;
   1.388 +        r = nestegg_track_codec_data_count(mContext, track, &nheaders);
   1.389 +        if (r == -1 || nheaders != 3) {
   1.390 +          Cleanup();
   1.391 +          return NS_ERROR_FAILURE;
   1.392 +        }
   1.393 +
   1.394 +        for (uint32_t header = 0; header < nheaders; ++header) {
   1.395 +          unsigned char* data = 0;
   1.396 +          size_t length = 0;
   1.397 +
   1.398 +          r = nestegg_track_codec_data(mContext, track, header, &data, &length);
   1.399 +          if (r == -1) {
   1.400 +            Cleanup();
   1.401 +            return NS_ERROR_FAILURE;
   1.402 +          }
   1.403 +          ogg_packet opacket = InitOggPacket(data, length, header == 0, false, 0);
   1.404 +
   1.405 +          r = vorbis_synthesis_headerin(&mVorbisInfo,
   1.406 +                                        &mVorbisComment,
   1.407 +                                        &opacket);
   1.408 +          if (r != 0) {
   1.409 +            Cleanup();
   1.410 +            return NS_ERROR_FAILURE;
   1.411 +          }
   1.412 +        }
   1.413 +
   1.414 +        r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
   1.415 +        if (r != 0) {
   1.416 +          Cleanup();
   1.417 +          return NS_ERROR_FAILURE;
   1.418 +        }
   1.419 +
   1.420 +        r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
   1.421 +        if (r != 0) {
   1.422 +          Cleanup();
   1.423 +          return NS_ERROR_FAILURE;
   1.424 +        }
   1.425 +
   1.426 +        mInfo.mAudio.mRate = mVorbisDsp.vi->rate;
   1.427 +        mInfo.mAudio.mChannels = mVorbisDsp.vi->channels;
   1.428 +        mChannels = mInfo.mAudio.mChannels;
   1.429 +#ifdef MOZ_OPUS
   1.430 +      } else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
   1.431 +        unsigned char* data = 0;
   1.432 +        size_t length = 0;
   1.433 +        r = nestegg_track_codec_data(mContext, track, 0, &data, &length);
   1.434 +        if (r == -1) {
   1.435 +          Cleanup();
   1.436 +          return NS_ERROR_FAILURE;
   1.437 +        }
   1.438 +
   1.439 +        mOpusParser = new OpusParser;
   1.440 +        if (!mOpusParser->DecodeHeader(data, length)) {
   1.441 +          Cleanup();
   1.442 +          return NS_ERROR_FAILURE;
   1.443 +        }
   1.444 +
   1.445 +        if (!InitOpusDecoder()) {
   1.446 +          Cleanup();
   1.447 +          return NS_ERROR_FAILURE;
   1.448 +        }
   1.449 +
   1.450 +        if (static_cast<int64_t>(mCodecDelay) != FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate).value()) {
   1.451 +          LOG(PR_LOG_WARNING,
   1.452 +              ("Invalid Opus header: CodecDelay and pre-skip do not match!\n"));
   1.453 +          Cleanup();
   1.454 +          return NS_ERROR_FAILURE;
   1.455 +        }
   1.456 +
   1.457 +        mInfo.mAudio.mRate = mOpusParser->mRate;
   1.458 +
   1.459 +        mInfo.mAudio.mChannels = mOpusParser->mChannels;
   1.460 +        mChannels = mInfo.mAudio.mChannels;
   1.461 +        mSeekPreroll = params.seek_preroll;
   1.462 +#endif
   1.463 +      } else {
   1.464 +        Cleanup();
   1.465 +        return NS_ERROR_FAILURE;
   1.466 +      }
   1.467 +    }
   1.468 +  }
   1.469 +
   1.470 +  // We can't seek in buffered regions if we have no cues.
   1.471 +  mDecoder->SetMediaSeekable(nestegg_has_cues(mContext) == 1);
   1.472 +
   1.473 +  *aInfo = mInfo;
   1.474 +
   1.475 +  *aTags = nullptr;
   1.476 +
   1.477 +  return NS_OK;
   1.478 +}
   1.479 +
   1.480 +#ifdef MOZ_OPUS
   1.481 +bool WebMReader::InitOpusDecoder()
   1.482 +{
   1.483 +  int r;
   1.484 +
   1.485 +  NS_ASSERTION(mOpusDecoder == nullptr, "leaking OpusDecoder");
   1.486 +
   1.487 +  mOpusDecoder = opus_multistream_decoder_create(mOpusParser->mRate,
   1.488 +                                             mOpusParser->mChannels,
   1.489 +                                             mOpusParser->mStreams,
   1.490 +                                             mOpusParser->mCoupledStreams,
   1.491 +                                             mOpusParser->mMappingTable,
   1.492 +                                             &r);
   1.493 +  mSkip = mOpusParser->mPreSkip;
   1.494 +
   1.495 +  return r == OPUS_OK;
   1.496 +}
   1.497 +#endif
   1.498 +
   1.499 +ogg_packet WebMReader::InitOggPacket(unsigned char* aData,
   1.500 +                                       size_t aLength,
   1.501 +                                       bool aBOS,
   1.502 +                                       bool aEOS,
   1.503 +                                       int64_t aGranulepos)
   1.504 +{
   1.505 +  ogg_packet packet;
   1.506 +  packet.packet = aData;
   1.507 +  packet.bytes = aLength;
   1.508 +  packet.b_o_s = aBOS;
   1.509 +  packet.e_o_s = aEOS;
   1.510 +  packet.granulepos = aGranulepos;
   1.511 +  packet.packetno = mPacketCount++;
   1.512 +  return packet;
   1.513 +}
   1.514 + 
   1.515 +bool WebMReader::DecodeAudioPacket(nestegg_packet* aPacket, int64_t aOffset)
   1.516 +{
   1.517 +  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   1.518 +
   1.519 +  int r = 0;
   1.520 +  unsigned int count = 0;
   1.521 +  r = nestegg_packet_count(aPacket, &count);
   1.522 +  if (r == -1) {
   1.523 +    return false;
   1.524 +  }
   1.525 +
   1.526 +  uint64_t tstamp = 0;
   1.527 +  r = nestegg_packet_tstamp(aPacket, &tstamp);
   1.528 +  if (r == -1) {
   1.529 +    return false;
   1.530 +  }
   1.531 +
   1.532 +  const uint32_t rate = mInfo.mAudio.mRate;
   1.533 +  uint64_t tstamp_usecs = tstamp / NS_PER_USEC;
   1.534 +  if (mAudioStartUsec == -1) {
   1.535 +    // This is the first audio chunk. Assume the start time of our decode
   1.536 +    // is the start of this chunk.
   1.537 +    mAudioStartUsec = tstamp_usecs;
   1.538 +  }
   1.539 +  // If there's a gap between the start of this audio chunk and the end of
   1.540 +  // the previous audio chunk, we need to increment the packet count so that
   1.541 +  // the vorbis decode doesn't use data from before the gap to help decode
   1.542 +  // from after the gap.
   1.543 +  CheckedInt64 tstamp_frames = UsecsToFrames(tstamp_usecs, rate);
   1.544 +  CheckedInt64 decoded_frames = UsecsToFrames(mAudioStartUsec, rate);
   1.545 +  if (!tstamp_frames.isValid() || !decoded_frames.isValid()) {
   1.546 +    NS_WARNING("Int overflow converting WebM times to frames");
   1.547 +    return false;
   1.548 +  }
   1.549 +  decoded_frames += mAudioFrames;
   1.550 +  if (!decoded_frames.isValid()) {
   1.551 +    NS_WARNING("Int overflow adding decoded_frames");
   1.552 +    return false;
   1.553 +  }
   1.554 +  if (tstamp_frames.value() > decoded_frames.value()) {
   1.555 +#ifdef DEBUG
   1.556 +    CheckedInt64 usecs = FramesToUsecs(tstamp_frames.value() - decoded_frames.value(), rate);
   1.557 +    LOG(PR_LOG_DEBUG, ("WebMReader detected gap of %lld, %lld frames, in audio stream\n",
   1.558 +      usecs.isValid() ? usecs.value() : -1,
   1.559 +      tstamp_frames.value() - decoded_frames.value()));
   1.560 +#endif
   1.561 +    mPacketCount++;
   1.562 +    mAudioStartUsec = tstamp_usecs;
   1.563 +    mAudioFrames = 0;
   1.564 +  }
   1.565 +
   1.566 +  int32_t total_frames = 0;
   1.567 +  for (uint32_t i = 0; i < count; ++i) {
   1.568 +    unsigned char* data;
   1.569 +    size_t length;
   1.570 +    r = nestegg_packet_data(aPacket, i, &data, &length);
   1.571 +    if (r == -1) {
   1.572 +      return false;
   1.573 +    }
   1.574 +    if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
   1.575 +      ogg_packet opacket = InitOggPacket(data, length, false, false, -1);
   1.576 +
   1.577 +      if (vorbis_synthesis(&mVorbisBlock, &opacket) != 0) {
   1.578 +        return false;
   1.579 +      }
   1.580 +
   1.581 +      if (vorbis_synthesis_blockin(&mVorbisDsp,
   1.582 +                                   &mVorbisBlock) != 0) {
   1.583 +        return false;
   1.584 +      }
   1.585 +
   1.586 +      VorbisPCMValue** pcm = 0;
   1.587 +      int32_t frames = 0;
   1.588 +      while ((frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm)) > 0) {
   1.589 +        nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * mChannels]);
   1.590 +        for (uint32_t j = 0; j < mChannels; ++j) {
   1.591 +          VorbisPCMValue* channel = pcm[j];
   1.592 +          for (uint32_t i = 0; i < uint32_t(frames); ++i) {
   1.593 +            buffer[i*mChannels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
   1.594 +          }
   1.595 +        }
   1.596 +
   1.597 +        CheckedInt64 duration = FramesToUsecs(frames, rate);
   1.598 +        if (!duration.isValid()) {
   1.599 +          NS_WARNING("Int overflow converting WebM audio duration");
   1.600 +          return false;
   1.601 +        }
   1.602 +        CheckedInt64 total_duration = FramesToUsecs(total_frames, rate);
   1.603 +        if (!total_duration.isValid()) {
   1.604 +          NS_WARNING("Int overflow converting WebM audio total_duration");
   1.605 +          return false;
   1.606 +        }
   1.607 +
   1.608 +        CheckedInt64 time = total_duration + tstamp_usecs;
   1.609 +        if (!time.isValid()) {
   1.610 +          NS_WARNING("Int overflow adding total_duration and tstamp_usecs");
   1.611 +          return false;
   1.612 +        };
   1.613 +
   1.614 +        total_frames += frames;
   1.615 +        AudioQueue().Push(new AudioData(aOffset,
   1.616 +                                       time.value(),
   1.617 +                                       duration.value(),
   1.618 +                                       frames,
   1.619 +                                       buffer.forget(),
   1.620 +                                       mChannels));
   1.621 +        mAudioFrames += frames;
   1.622 +        if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
   1.623 +          return false;
   1.624 +        }
   1.625 +      }
   1.626 +    } else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
   1.627 +#ifdef MOZ_OPUS
   1.628 +      uint32_t channels = mOpusParser->mChannels;
   1.629 +
   1.630 +      // Maximum value is 63*2880, so there's no chance of overflow.
   1.631 +      int32_t frames_number = opus_packet_get_nb_frames(data, length);
   1.632 +
   1.633 +      if (frames_number <= 0)
   1.634 +        return false; // Invalid packet header.
   1.635 +      int32_t samples = opus_packet_get_samples_per_frame(data,
   1.636 +                                                          (opus_int32) rate);
   1.637 +      int32_t frames = frames_number*samples;
   1.638 +
   1.639 +      // A valid Opus packet must be between 2.5 and 120 ms long.
   1.640 +      if (frames < 120 || frames > 5760)
   1.641 +        return false;
   1.642 +      nsAutoArrayPtr<AudioDataValue> buffer(new AudioDataValue[frames * channels]);
   1.643 +
   1.644 +      // Decode to the appropriate sample type.
   1.645 +#ifdef MOZ_SAMPLE_TYPE_FLOAT32
   1.646 +      int ret = opus_multistream_decode_float(mOpusDecoder,
   1.647 +                                              data, length,
   1.648 +                                              buffer, frames, false);
   1.649 +#else
   1.650 +      int ret = opus_multistream_decode(mOpusDecoder,
   1.651 +                                        data, length,
   1.652 +                                        buffer, frames, false);
   1.653 +#endif
   1.654 +      if (ret < 0)
   1.655 +        return false;
   1.656 +      NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
   1.657 +      CheckedInt64 startTime = tstamp_usecs;
   1.658 +
   1.659 +      // Trim the initial frames while the decoder is settling.
   1.660 +      if (mSkip > 0) {
   1.661 +        int32_t skipFrames = std::min(mSkip, frames);
   1.662 +        if (skipFrames == frames) {
   1.663 +          // discard the whole packet
   1.664 +          mSkip -= frames;
   1.665 +          LOG(PR_LOG_DEBUG, ("Opus decoder skipping %d frames"
   1.666 +                             " (whole packet)", frames));
   1.667 +          return true;
   1.668 +        }
   1.669 +        int32_t keepFrames = frames - skipFrames;
   1.670 +        if (keepFrames < 0) {
   1.671 +          NS_WARNING("Int overflow in keepFrames");
   1.672 +          return false;
   1.673 +	}
   1.674 +        int samples = keepFrames * channels;
   1.675 +	if (samples < 0) {
   1.676 +          NS_WARNING("Int overflow in samples");
   1.677 +          return false;
   1.678 +	}
   1.679 +        nsAutoArrayPtr<AudioDataValue> trimBuffer(new AudioDataValue[samples]);
   1.680 +        for (int i = 0; i < samples; i++)
   1.681 +          trimBuffer[i] = buffer[skipFrames*channels + i];
   1.682 +        startTime = startTime + FramesToUsecs(skipFrames, rate);
   1.683 +        frames = keepFrames;
   1.684 +        buffer = trimBuffer;
   1.685 +
   1.686 +        mSkip -= skipFrames;
   1.687 +        LOG(PR_LOG_DEBUG, ("Opus decoder skipping %d frames", skipFrames));
   1.688 +      }
   1.689 +
   1.690 +      int64_t discardPadding = 0;
   1.691 +      r = nestegg_packet_discard_padding(aPacket, &discardPadding);
   1.692 +      if (discardPadding > 0) {
   1.693 +        CheckedInt64 discardFrames = UsecsToFrames(discardPadding * NS_PER_USEC, rate);
   1.694 +        if (!discardFrames.isValid()) {
   1.695 +          NS_WARNING("Int overflow in DiscardPadding");
   1.696 +          return false;
   1.697 +        }
   1.698 +        int32_t keepFrames = frames - discardFrames.value();
   1.699 +        if (keepFrames > 0) {
   1.700 +          int samples = keepFrames * channels;
   1.701 +          if (samples < 0) {
   1.702 +            NS_WARNING("Int overflow in samples");
   1.703 +            return false;
   1.704 +          }
   1.705 +          nsAutoArrayPtr<AudioDataValue> trimBuffer(new AudioDataValue[samples]);
   1.706 +          for (int i = 0; i < samples; i++)
   1.707 +            trimBuffer[i] = buffer[i];
   1.708 +          frames = keepFrames;
   1.709 +          buffer = trimBuffer;
   1.710 +        } else {
   1.711 +          LOG(PR_LOG_DEBUG, ("Opus decoder discarding whole packet"
   1.712 +                             " ( %d frames) as padding", frames));
   1.713 +          return true;
   1.714 +        }
   1.715 +      }
   1.716 +
   1.717 +      // Apply the header gain if one was specified.
   1.718 +#ifdef MOZ_SAMPLE_TYPE_FLOAT32
   1.719 +      if (mOpusParser->mGain != 1.0f) {
   1.720 +        float gain = mOpusParser->mGain;
   1.721 +        int samples = frames * channels;
   1.722 +        for (int i = 0; i < samples; i++) {
   1.723 +          buffer[i] *= gain;
   1.724 +        }
   1.725 +      }
   1.726 +#else
   1.727 +      if (mOpusParser->mGain_Q16 != 65536) {
   1.728 +        int64_t gain_Q16 = mOpusParser->mGain_Q16;
   1.729 +        int samples = frames * channels;
   1.730 +        for (int i = 0; i < samples; i++) {
   1.731 +          int32_t val = static_cast<int32_t>((gain_Q16*buffer[i] + 32768)>>16);
   1.732 +          buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
   1.733 +        }
   1.734 +      }
   1.735 +#endif
   1.736 +
   1.737 +      // No channel mapping for more than 8 channels.
   1.738 +      if (channels > 8) {
   1.739 +        return false;
   1.740 +      }
   1.741 +
   1.742 +      CheckedInt64 duration = FramesToUsecs(frames, rate);
   1.743 +      if (!duration.isValid()) {
   1.744 +        NS_WARNING("Int overflow converting WebM audio duration");
   1.745 +        return false;
   1.746 +      }
   1.747 +      CheckedInt64 time = startTime - mCodecDelay;
   1.748 +      if (!time.isValid()) {
   1.749 +        NS_WARNING("Int overflow shifting tstamp by codec delay");
   1.750 +        return false;
   1.751 +      };
   1.752 +      AudioQueue().Push(new AudioData(mDecoder->GetResource()->Tell(),
   1.753 +                                     time.value(),
   1.754 +                                     duration.value(),
   1.755 +                                     frames,
   1.756 +                                     buffer.forget(),
   1.757 +                                     mChannels));
   1.758 +
   1.759 +      mAudioFrames += frames;
   1.760 +#else
   1.761 +      return false;
   1.762 +#endif /* MOZ_OPUS */
   1.763 +    }
   1.764 +  }
   1.765 +
   1.766 +  return true;
   1.767 +}
   1.768 +
   1.769 +nsReturnRef<NesteggPacketHolder> WebMReader::NextPacket(TrackType aTrackType)
   1.770 +{
   1.771 +  // The packet queue that packets will be pushed on if they
   1.772 +  // are not the type we are interested in.
   1.773 +  WebMPacketQueue& otherPackets =
   1.774 +    aTrackType == VIDEO ? mAudioPackets : mVideoPackets;
   1.775 +
   1.776 +  // The packet queue for the type that we are interested in.
   1.777 +  WebMPacketQueue &packets =
   1.778 +    aTrackType == VIDEO ? mVideoPackets : mAudioPackets;
   1.779 +
   1.780 +  // Flag to indicate that we do need to playback these types of
   1.781 +  // packets.
   1.782 +  bool hasType = aTrackType == VIDEO ? mHasVideo : mHasAudio;
   1.783 +
   1.784 +  // Flag to indicate that we do need to playback the other type
   1.785 +  // of track.
   1.786 +  bool hasOtherType = aTrackType == VIDEO ? mHasAudio : mHasVideo;
   1.787 +
   1.788 +  // Track we are interested in
   1.789 +  uint32_t ourTrack = aTrackType == VIDEO ? mVideoTrack : mAudioTrack;
   1.790 +
   1.791 +  // Value of other track
   1.792 +  uint32_t otherTrack = aTrackType == VIDEO ? mAudioTrack : mVideoTrack;
   1.793 +
   1.794 +  nsAutoRef<NesteggPacketHolder> holder;
   1.795 +
   1.796 +  if (packets.GetSize() > 0) {
   1.797 +    holder.own(packets.PopFront());
   1.798 +  } else {
   1.799 +    // Keep reading packets until we find a packet
   1.800 +    // for the track we want.
   1.801 +    do {
   1.802 +      nestegg_packet* packet;
   1.803 +      int r = nestegg_read_packet(mContext, &packet);
   1.804 +      if (r <= 0) {
   1.805 +        return nsReturnRef<NesteggPacketHolder>();
   1.806 +      }
   1.807 +      int64_t offset = mDecoder->GetResource()->Tell();
   1.808 +      holder.own(new NesteggPacketHolder(packet, offset));
   1.809 +
   1.810 +      unsigned int track = 0;
   1.811 +      r = nestegg_packet_track(packet, &track);
   1.812 +      if (r == -1) {
   1.813 +        return nsReturnRef<NesteggPacketHolder>();
   1.814 +      }
   1.815 +
   1.816 +      if (hasOtherType && otherTrack == track) {
   1.817 +        // Save the packet for when we want these packets
   1.818 +        otherPackets.Push(holder.disown());
   1.819 +        continue;
   1.820 +      }
   1.821 +
   1.822 +      // The packet is for the track we want to play
   1.823 +      if (hasType && ourTrack == track) {
   1.824 +        break;
   1.825 +      }
   1.826 +    } while (true);
   1.827 +  }
   1.828 +
   1.829 +  return holder.out();
   1.830 +}
   1.831 +
   1.832 +bool WebMReader::DecodeAudioData()
   1.833 +{
   1.834 +  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   1.835 +
   1.836 +  nsAutoRef<NesteggPacketHolder> holder(NextPacket(AUDIO));
   1.837 +  if (!holder) {
   1.838 +    return false;
   1.839 +  }
   1.840 +
   1.841 +  return DecodeAudioPacket(holder->mPacket, holder->mOffset);
   1.842 +}
   1.843 +
   1.844 +bool WebMReader::DecodeVideoFrame(bool &aKeyframeSkip,
   1.845 +                                      int64_t aTimeThreshold)
   1.846 +{
   1.847 +  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
   1.848 +
   1.849 +  // Record number of frames decoded and parsed. Automatically update the
   1.850 +  // stats counters using the AutoNotifyDecoded stack-based class.
   1.851 +  uint32_t parsed = 0, decoded = 0;
   1.852 +  AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded);
   1.853 +
   1.854 +  nsAutoRef<NesteggPacketHolder> holder(NextPacket(VIDEO));
   1.855 +  if (!holder) {
   1.856 +    return false;
   1.857 +  }
   1.858 +
   1.859 +  nestegg_packet* packet = holder->mPacket;
   1.860 +  unsigned int track = 0;
   1.861 +  int r = nestegg_packet_track(packet, &track);
   1.862 +  if (r == -1) {
   1.863 +    return false;
   1.864 +  }
   1.865 +
   1.866 +  unsigned int count = 0;
   1.867 +  r = nestegg_packet_count(packet, &count);
   1.868 +  if (r == -1) {
   1.869 +    return false;
   1.870 +  }
   1.871 +
   1.872 +  uint64_t tstamp = 0;
   1.873 +  r = nestegg_packet_tstamp(packet, &tstamp);
   1.874 +  if (r == -1) {
   1.875 +    return false;
   1.876 +  }
   1.877 +
   1.878 +  // The end time of this frame is the start time of the next frame.  Fetch
   1.879 +  // the timestamp of the next packet for this track.  If we've reached the
   1.880 +  // end of the resource, use the file's duration as the end time of this
   1.881 +  // video frame.
   1.882 +  uint64_t next_tstamp = 0;
   1.883 +  nsAutoRef<NesteggPacketHolder> next_holder(NextPacket(VIDEO));
   1.884 +  if (next_holder) {
   1.885 +    r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp);
   1.886 +    if (r == -1) {
   1.887 +      return false;
   1.888 +    }
   1.889 +    PushVideoPacket(next_holder.disown());
   1.890 +  } else {
   1.891 +    ReentrantMonitorAutoEnter decoderMon(mDecoder->GetReentrantMonitor());
   1.892 +    int64_t endTime = mDecoder->GetEndMediaTime();
   1.893 +    if (endTime == -1) {
   1.894 +      return false;
   1.895 +    }
   1.896 +    next_tstamp = endTime * NS_PER_USEC;
   1.897 +  }
   1.898 +
   1.899 +  int64_t tstamp_usecs = tstamp / NS_PER_USEC;
   1.900 +  for (uint32_t i = 0; i < count; ++i) {
   1.901 +    unsigned char* data;
   1.902 +    size_t length;
   1.903 +    r = nestegg_packet_data(packet, i, &data, &length);
   1.904 +    if (r == -1) {
   1.905 +      return false;
   1.906 +    }
   1.907 +
   1.908 +    vpx_codec_stream_info_t si;
   1.909 +    memset(&si, 0, sizeof(si));
   1.910 +    si.sz = sizeof(si);
   1.911 +    if (mVideoCodec == NESTEGG_CODEC_VP8) {
   1.912 +      vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
   1.913 +    } else if (mVideoCodec == NESTEGG_CODEC_VP9) {
   1.914 +      vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
   1.915 +    }
   1.916 +    if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
   1.917 +      // Skipping to next keyframe...
   1.918 +      parsed++; // Assume 1 frame per chunk.
   1.919 +      continue;
   1.920 +    }
   1.921 +
   1.922 +    if (aKeyframeSkip && si.is_kf) {
   1.923 +      aKeyframeSkip = false;
   1.924 +    }
   1.925 +
   1.926 +    if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) {
   1.927 +      return false;
   1.928 +    }
   1.929 +
   1.930 +    // If the timestamp of the video frame is less than
   1.931 +    // the time threshold required then it is not added
   1.932 +    // to the video queue and won't be displayed.
   1.933 +    if (tstamp_usecs < aTimeThreshold) {
   1.934 +      parsed++; // Assume 1 frame per chunk.
   1.935 +      continue;
   1.936 +    }
   1.937 +
   1.938 +    vpx_codec_iter_t  iter = nullptr;
   1.939 +    vpx_image_t      *img;
   1.940 +
   1.941 +    while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
   1.942 +      NS_ASSERTION(img->fmt == IMG_FMT_I420, "WebM image format is not I420");
   1.943 +
   1.944 +      // Chroma shifts are rounded down as per the decoding examples in the VP8 SDK
   1.945 +      VideoData::YCbCrBuffer b;
   1.946 +      b.mPlanes[0].mData = img->planes[0];
   1.947 +      b.mPlanes[0].mStride = img->stride[0];
   1.948 +      b.mPlanes[0].mHeight = img->d_h;
   1.949 +      b.mPlanes[0].mWidth = img->d_w;
   1.950 +      b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
   1.951 +
   1.952 +      b.mPlanes[1].mData = img->planes[1];
   1.953 +      b.mPlanes[1].mStride = img->stride[1];
   1.954 +      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
   1.955 +      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
   1.956 +      b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
   1.957 + 
   1.958 +      b.mPlanes[2].mData = img->planes[2];
   1.959 +      b.mPlanes[2].mStride = img->stride[2];
   1.960 +      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
   1.961 +      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
   1.962 +      b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
   1.963 +  
   1.964 +      IntRect picture = ToIntRect(mPicture);
   1.965 +      if (img->d_w != static_cast<uint32_t>(mInitialFrame.width) ||
   1.966 +          img->d_h != static_cast<uint32_t>(mInitialFrame.height)) {
   1.967 +        // Frame size is different from what the container reports. This is legal
   1.968 +        // in WebM, and we will preserve the ratio of the crop rectangle as it
   1.969 +        // was reported relative to the picture size reported by the container.
   1.970 +        picture.x = (mPicture.x * img->d_w) / mInitialFrame.width;
   1.971 +        picture.y = (mPicture.y * img->d_h) / mInitialFrame.height;
   1.972 +        picture.width = (img->d_w * mPicture.width) / mInitialFrame.width;
   1.973 +        picture.height = (img->d_h * mPicture.height) / mInitialFrame.height;
   1.974 +      }
   1.975 +
   1.976 +      VideoData *v = VideoData::Create(mInfo.mVideo,
   1.977 +                                       mDecoder->GetImageContainer(),
   1.978 +                                       holder->mOffset,
   1.979 +                                       tstamp_usecs,
   1.980 +                                       (next_tstamp / NS_PER_USEC) - tstamp_usecs,
   1.981 +                                       b,
   1.982 +                                       si.is_kf,
   1.983 +                                       -1,
   1.984 +                                       picture);
   1.985 +      if (!v) {
   1.986 +        return false;
   1.987 +      }
   1.988 +      parsed++;
   1.989 +      decoded++;
   1.990 +      NS_ASSERTION(decoded <= parsed,
   1.991 +        "Expect only 1 frame per chunk per packet in WebM...");
   1.992 +      VideoQueue().Push(v);
   1.993 +    }
   1.994 +  }
   1.995 +
   1.996 +  return true;
   1.997 +}
   1.998 +
   1.999 +void
  1.1000 +WebMReader::PushVideoPacket(NesteggPacketHolder* aItem)
  1.1001 +{
  1.1002 +    mVideoPackets.PushFront(aItem);
  1.1003 +}
  1.1004 +
  1.1005 +nsresult WebMReader::Seek(int64_t aTarget, int64_t aStartTime, int64_t aEndTime,
  1.1006 +                            int64_t aCurrentTime)
  1.1007 +{
  1.1008 +  NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
  1.1009 +
  1.1010 +  LOG(PR_LOG_DEBUG, ("Reader [%p] for Decoder [%p]: About to seek to %fs",
  1.1011 +                     this, mDecoder, aTarget/1000000.0));
  1.1012 +  if (NS_FAILED(ResetDecode())) {
  1.1013 +    return NS_ERROR_FAILURE;
  1.1014 +  }
  1.1015 +  uint32_t trackToSeek = mHasVideo ? mVideoTrack : mAudioTrack;
  1.1016 +  uint64_t target = aTarget * NS_PER_USEC;
  1.1017 +  if (mSeekPreroll) {
  1.1018 +    target = std::max(static_cast<uint64_t>(aStartTime * NS_PER_USEC), target - mSeekPreroll);
  1.1019 +  }
  1.1020 +  int r = nestegg_track_seek(mContext, trackToSeek, target);
  1.1021 +  if (r != 0) {
  1.1022 +    // Try seeking directly based on cluster information in memory.
  1.1023 +    int64_t offset = 0;
  1.1024 +    bool rv = mBufferedState->GetOffsetForTime((aTarget - aStartTime)/NS_PER_USEC, &offset);
  1.1025 +    if (!rv) {
  1.1026 +      return NS_ERROR_FAILURE;
  1.1027 +    }
  1.1028 +
  1.1029 +    r = nestegg_offset_seek(mContext, offset);
  1.1030 +    if (r != 0) {
  1.1031 +      return NS_ERROR_FAILURE;
  1.1032 +    }
  1.1033 +  }
  1.1034 +  return NS_OK;
  1.1035 +}
  1.1036 +
  1.1037 +nsresult WebMReader::GetBuffered(dom::TimeRanges* aBuffered, int64_t aStartTime)
  1.1038 +{
  1.1039 +  MediaResource* resource = mDecoder->GetResource();
  1.1040 +
  1.1041 +  uint64_t timecodeScale;
  1.1042 +  if (!mContext || nestegg_tstamp_scale(mContext, &timecodeScale) == -1) {
  1.1043 +    return NS_OK;
  1.1044 +  }
  1.1045 +
  1.1046 +  // Special case completely cached files.  This also handles local files.
  1.1047 +  bool isFullyCached = resource->IsDataCachedToEndOfResource(0);
  1.1048 +  if (isFullyCached) {
  1.1049 +    uint64_t duration = 0;
  1.1050 +    if (nestegg_duration(mContext, &duration) == 0) {
  1.1051 +      aBuffered->Add(0, duration / NS_PER_S);
  1.1052 +    }
  1.1053 +  }
  1.1054 +
  1.1055 +  uint32_t bufferedLength = 0;
  1.1056 +  aBuffered->GetLength(&bufferedLength);
  1.1057 +
  1.1058 +  // Either we the file is not fully cached, or we couldn't find a duration in
  1.1059 +  // the WebM bitstream.
  1.1060 +  if (!isFullyCached || !bufferedLength) {
  1.1061 +    MediaResource* resource = mDecoder->GetResource();
  1.1062 +    nsTArray<MediaByteRange> ranges;
  1.1063 +    nsresult res = resource->GetCachedRanges(ranges);
  1.1064 +    NS_ENSURE_SUCCESS(res, res);
  1.1065 +
  1.1066 +    for (uint32_t index = 0; index < ranges.Length(); index++) {
  1.1067 +      uint64_t start, end;
  1.1068 +      bool rv = mBufferedState->CalculateBufferedForRange(ranges[index].mStart,
  1.1069 +                                                          ranges[index].mEnd,
  1.1070 +                                                          &start, &end);
  1.1071 +      if (rv) {
  1.1072 +        double startTime = start * timecodeScale / NS_PER_S - aStartTime;
  1.1073 +        double endTime = end * timecodeScale / NS_PER_S - aStartTime;
  1.1074 +        // If this range extends to the end of the file, the true end time
  1.1075 +        // is the file's duration.
  1.1076 +        if (resource->IsDataCachedToEndOfResource(ranges[index].mStart)) {
  1.1077 +          uint64_t duration = 0;
  1.1078 +          if (nestegg_duration(mContext, &duration) == 0) {
  1.1079 +            endTime = duration / NS_PER_S;
  1.1080 +          }
  1.1081 +        }
  1.1082 +
  1.1083 +        aBuffered->Add(startTime, endTime);
  1.1084 +      }
  1.1085 +    }
  1.1086 +  }
  1.1087 +
  1.1088 +  return NS_OK;
  1.1089 +}
  1.1090 +
  1.1091 +void WebMReader::NotifyDataArrived(const char* aBuffer, uint32_t aLength, int64_t aOffset)
  1.1092 +{
  1.1093 +  mBufferedState->NotifyDataArrived(aBuffer, aLength, aOffset);
  1.1094 +}
  1.1095 +
  1.1096 +} // namespace mozilla

mercurial