Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim:set ts=2 sw=2 sts=2 et cindent: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "mozilla/ReentrantMonitor.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "MediaCache.h" |
michael@0 | 10 | #include "prio.h" |
michael@0 | 11 | #include "nsContentUtils.h" |
michael@0 | 12 | #include "nsThreadUtils.h" |
michael@0 | 13 | #include "MediaResource.h" |
michael@0 | 14 | #include "prlog.h" |
michael@0 | 15 | #include "mozilla/Preferences.h" |
michael@0 | 16 | #include "FileBlockCache.h" |
michael@0 | 17 | #include "nsAnonymousTemporaryFile.h" |
michael@0 | 18 | #include "nsIObserverService.h" |
michael@0 | 19 | #include "nsISeekableStream.h" |
michael@0 | 20 | #include "nsIPrincipal.h" |
michael@0 | 21 | #include "mozilla/Attributes.h" |
michael@0 | 22 | #include "mozilla/Services.h" |
michael@0 | 23 | #include <algorithm> |
michael@0 | 24 | |
michael@0 | 25 | namespace mozilla { |
michael@0 | 26 | |
michael@0 | 27 | #ifdef PR_LOGGING |
michael@0 | 28 | PRLogModuleInfo* gMediaCacheLog; |
michael@0 | 29 | #define CACHE_LOG(type, msg) PR_LOG(gMediaCacheLog, type, msg) |
michael@0 | 30 | #else |
michael@0 | 31 | #define CACHE_LOG(type, msg) |
michael@0 | 32 | #endif |
michael@0 | 33 | |
michael@0 | 34 | // Readahead blocks for non-seekable streams will be limited to this |
michael@0 | 35 | // fraction of the cache space. We don't normally evict such blocks |
michael@0 | 36 | // because replacing them requires a seek, but we need to make sure |
michael@0 | 37 | // they don't monopolize the cache. |
michael@0 | 38 | static const double NONSEEKABLE_READAHEAD_MAX = 0.5; |
michael@0 | 39 | |
michael@0 | 40 | // Data N seconds before the current playback position is given the same priority |
michael@0 | 41 | // as data REPLAY_PENALTY_FACTOR*N seconds ahead of the current playback |
michael@0 | 42 | // position. REPLAY_PENALTY_FACTOR is greater than 1 to reflect that |
michael@0 | 43 | // data in the past is less likely to be played again than data in the future. |
michael@0 | 44 | // We want to give data just behind the current playback position reasonably |
michael@0 | 45 | // high priority in case codecs need to retrieve that data (e.g. because |
michael@0 | 46 | // tracks haven't been muxed well or are being decoded at uneven rates). |
michael@0 | 47 | // 1/REPLAY_PENALTY_FACTOR as much data will be kept behind the |
michael@0 | 48 | // current playback position as will be kept ahead of the current playback |
michael@0 | 49 | // position. |
michael@0 | 50 | static const uint32_t REPLAY_PENALTY_FACTOR = 3; |
michael@0 | 51 | |
michael@0 | 52 | // When looking for a reusable block, scan forward this many blocks |
michael@0 | 53 | // from the desired "best" block location to look for free blocks, |
michael@0 | 54 | // before we resort to scanning the whole cache. The idea is to try to |
michael@0 | 55 | // store runs of stream blocks close-to-consecutively in the cache if we |
michael@0 | 56 | // can. |
michael@0 | 57 | static const uint32_t FREE_BLOCK_SCAN_LIMIT = 16; |
michael@0 | 58 | |
michael@0 | 59 | // Try to save power by not resuming paused reads if the stream won't need new |
michael@0 | 60 | // data within this time interval in the future |
michael@0 | 61 | static const uint32_t CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS = 10000; |
michael@0 | 62 | |
michael@0 | 63 | #ifdef DEBUG |
michael@0 | 64 | // Turn this on to do very expensive cache state validation |
michael@0 | 65 | // #define DEBUG_VERIFY_CACHE |
michael@0 | 66 | #endif |
michael@0 | 67 | |
michael@0 | 68 | // There is at most one media cache (although that could quite easily be |
michael@0 | 69 | // relaxed if we wanted to manage multiple caches with independent |
michael@0 | 70 | // size limits). |
michael@0 | 71 | static MediaCache* gMediaCache; |
michael@0 | 72 | |
michael@0 | 73 | class MediaCacheFlusher MOZ_FINAL : public nsIObserver, |
michael@0 | 74 | public nsSupportsWeakReference { |
michael@0 | 75 | MediaCacheFlusher() {} |
michael@0 | 76 | ~MediaCacheFlusher(); |
michael@0 | 77 | public: |
michael@0 | 78 | NS_DECL_ISUPPORTS |
michael@0 | 79 | NS_DECL_NSIOBSERVER |
michael@0 | 80 | |
michael@0 | 81 | static void Init(); |
michael@0 | 82 | }; |
michael@0 | 83 | |
michael@0 | 84 | static MediaCacheFlusher* gMediaCacheFlusher; |
michael@0 | 85 | |
michael@0 | 86 | NS_IMPL_ISUPPORTS(MediaCacheFlusher, nsIObserver, nsISupportsWeakReference) |
michael@0 | 87 | |
michael@0 | 88 | MediaCacheFlusher::~MediaCacheFlusher() |
michael@0 | 89 | { |
michael@0 | 90 | gMediaCacheFlusher = nullptr; |
michael@0 | 91 | } |
michael@0 | 92 | |
michael@0 | 93 | void MediaCacheFlusher::Init() |
michael@0 | 94 | { |
michael@0 | 95 | if (gMediaCacheFlusher) { |
michael@0 | 96 | return; |
michael@0 | 97 | } |
michael@0 | 98 | |
michael@0 | 99 | gMediaCacheFlusher = new MediaCacheFlusher(); |
michael@0 | 100 | NS_ADDREF(gMediaCacheFlusher); |
michael@0 | 101 | |
michael@0 | 102 | nsCOMPtr<nsIObserverService> observerService = |
michael@0 | 103 | mozilla::services::GetObserverService(); |
michael@0 | 104 | if (observerService) { |
michael@0 | 105 | observerService->AddObserver(gMediaCacheFlusher, "last-pb-context-exited", true); |
michael@0 | 106 | observerService->AddObserver(gMediaCacheFlusher, "network-clear-cache-stored-anywhere", true); |
michael@0 | 107 | } |
michael@0 | 108 | } |
michael@0 | 109 | |
michael@0 | 110 | class MediaCache { |
michael@0 | 111 | public: |
michael@0 | 112 | friend class MediaCacheStream::BlockList; |
michael@0 | 113 | typedef MediaCacheStream::BlockList BlockList; |
michael@0 | 114 | enum { |
michael@0 | 115 | BLOCK_SIZE = MediaCacheStream::BLOCK_SIZE |
michael@0 | 116 | }; |
michael@0 | 117 | |
michael@0 | 118 | MediaCache() : mNextResourceID(1), |
michael@0 | 119 | mReentrantMonitor("MediaCache.mReentrantMonitor"), |
michael@0 | 120 | mUpdateQueued(false) |
michael@0 | 121 | #ifdef DEBUG |
michael@0 | 122 | , mInUpdate(false) |
michael@0 | 123 | #endif |
michael@0 | 124 | { |
michael@0 | 125 | MOZ_COUNT_CTOR(MediaCache); |
michael@0 | 126 | } |
michael@0 | 127 | ~MediaCache() { |
michael@0 | 128 | NS_ASSERTION(mStreams.IsEmpty(), "Stream(s) still open!"); |
michael@0 | 129 | Truncate(); |
michael@0 | 130 | NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); |
michael@0 | 131 | if (mFileCache) { |
michael@0 | 132 | mFileCache->Close(); |
michael@0 | 133 | mFileCache = nullptr; |
michael@0 | 134 | } |
michael@0 | 135 | MOZ_COUNT_DTOR(MediaCache); |
michael@0 | 136 | } |
michael@0 | 137 | |
michael@0 | 138 | // Main thread only. Creates the backing cache file. If this fails, |
michael@0 | 139 | // then the cache is still in a semi-valid state; mFD will be null, |
michael@0 | 140 | // so all I/O on the cache file will fail. |
michael@0 | 141 | nsresult Init(); |
michael@0 | 142 | // Shut down the global cache if it's no longer needed. We shut down |
michael@0 | 143 | // the cache as soon as there are no streams. This means that during |
michael@0 | 144 | // normal operation we are likely to start up the cache and shut it down |
michael@0 | 145 | // many times, but that's OK since starting it up is cheap and |
michael@0 | 146 | // shutting it down cleans things up and releases disk space. |
michael@0 | 147 | static void MaybeShutdown(); |
michael@0 | 148 | |
michael@0 | 149 | // Brutally flush the cache contents. Main thread only. |
michael@0 | 150 | static void Flush(); |
michael@0 | 151 | void FlushInternal(); |
michael@0 | 152 | |
michael@0 | 153 | // Cache-file access methods. These are the lowest-level cache methods. |
michael@0 | 154 | // mReentrantMonitor must be held; these can be called on any thread. |
michael@0 | 155 | // This can return partial reads. |
michael@0 | 156 | nsresult ReadCacheFile(int64_t aOffset, void* aData, int32_t aLength, |
michael@0 | 157 | int32_t* aBytes); |
michael@0 | 158 | // This will fail if all aLength bytes are not read |
michael@0 | 159 | nsresult ReadCacheFileAllBytes(int64_t aOffset, void* aData, int32_t aLength); |
michael@0 | 160 | |
michael@0 | 161 | int64_t AllocateResourceID() |
michael@0 | 162 | { |
michael@0 | 163 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 164 | return mNextResourceID++; |
michael@0 | 165 | } |
michael@0 | 166 | |
michael@0 | 167 | // mReentrantMonitor must be held, called on main thread. |
michael@0 | 168 | // These methods are used by the stream to set up and tear down streams, |
michael@0 | 169 | // and to handle reads and writes. |
michael@0 | 170 | // Add aStream to the list of streams. |
michael@0 | 171 | void OpenStream(MediaCacheStream* aStream); |
michael@0 | 172 | // Remove aStream from the list of streams. |
michael@0 | 173 | void ReleaseStream(MediaCacheStream* aStream); |
michael@0 | 174 | // Free all blocks belonging to aStream. |
michael@0 | 175 | void ReleaseStreamBlocks(MediaCacheStream* aStream); |
michael@0 | 176 | // Find a cache entry for this data, and write the data into it |
michael@0 | 177 | void AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData, |
michael@0 | 178 | MediaCacheStream::ReadMode aMode); |
michael@0 | 179 | |
michael@0 | 180 | // mReentrantMonitor must be held; can be called on any thread |
michael@0 | 181 | // Notify the cache that a seek has been requested. Some blocks may |
michael@0 | 182 | // need to change their class between PLAYED_BLOCK and READAHEAD_BLOCK. |
michael@0 | 183 | // This does not trigger channel seeks directly, the next Update() |
michael@0 | 184 | // will do that if necessary. The caller will call QueueUpdate(). |
michael@0 | 185 | void NoteSeek(MediaCacheStream* aStream, int64_t aOldOffset); |
michael@0 | 186 | // Notify the cache that a block has been read from. This is used |
michael@0 | 187 | // to update last-use times. The block may not actually have a |
michael@0 | 188 | // cache entry yet since Read can read data from a stream's |
michael@0 | 189 | // in-memory mPartialBlockBuffer while the block is only partly full, |
michael@0 | 190 | // and thus hasn't yet been committed to the cache. The caller will |
michael@0 | 191 | // call QueueUpdate(). |
michael@0 | 192 | void NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex, |
michael@0 | 193 | MediaCacheStream::ReadMode aMode, TimeStamp aNow); |
michael@0 | 194 | // Mark aStream as having the block, adding it as an owner. |
michael@0 | 195 | void AddBlockOwnerAsReadahead(int32_t aBlockIndex, MediaCacheStream* aStream, |
michael@0 | 196 | int32_t aStreamBlockIndex); |
michael@0 | 197 | |
michael@0 | 198 | // This queues a call to Update() on the main thread. |
michael@0 | 199 | void QueueUpdate(); |
michael@0 | 200 | |
michael@0 | 201 | // Updates the cache state asynchronously on the main thread: |
michael@0 | 202 | // -- try to trim the cache back to its desired size, if necessary |
michael@0 | 203 | // -- suspend channels that are going to read data that's lower priority |
michael@0 | 204 | // than anything currently cached |
michael@0 | 205 | // -- resume channels that are going to read data that's higher priority |
michael@0 | 206 | // than something currently cached |
michael@0 | 207 | // -- seek channels that need to seek to a new location |
michael@0 | 208 | void Update(); |
michael@0 | 209 | |
michael@0 | 210 | #ifdef DEBUG_VERIFY_CACHE |
michael@0 | 211 | // Verify invariants, especially block list invariants |
michael@0 | 212 | void Verify(); |
michael@0 | 213 | #else |
michael@0 | 214 | void Verify() {} |
michael@0 | 215 | #endif |
michael@0 | 216 | |
michael@0 | 217 | ReentrantMonitor& GetReentrantMonitor() { return mReentrantMonitor; } |
michael@0 | 218 | |
michael@0 | 219 | /** |
michael@0 | 220 | * An iterator that makes it easy to iterate through all streams that |
michael@0 | 221 | * have a given resource ID and are not closed. |
michael@0 | 222 | * Can be used on the main thread or while holding the media cache lock. |
michael@0 | 223 | */ |
michael@0 | 224 | class ResourceStreamIterator { |
michael@0 | 225 | public: |
michael@0 | 226 | ResourceStreamIterator(int64_t aResourceID) : |
michael@0 | 227 | mResourceID(aResourceID), mNext(0) {} |
michael@0 | 228 | MediaCacheStream* Next() |
michael@0 | 229 | { |
michael@0 | 230 | while (mNext < gMediaCache->mStreams.Length()) { |
michael@0 | 231 | MediaCacheStream* stream = gMediaCache->mStreams[mNext]; |
michael@0 | 232 | ++mNext; |
michael@0 | 233 | if (stream->GetResourceID() == mResourceID && !stream->IsClosed()) |
michael@0 | 234 | return stream; |
michael@0 | 235 | } |
michael@0 | 236 | return nullptr; |
michael@0 | 237 | } |
michael@0 | 238 | private: |
michael@0 | 239 | int64_t mResourceID; |
michael@0 | 240 | uint32_t mNext; |
michael@0 | 241 | }; |
michael@0 | 242 | |
michael@0 | 243 | protected: |
michael@0 | 244 | // Find a free or reusable block and return its index. If there are no |
michael@0 | 245 | // free blocks and no reusable blocks, add a new block to the cache |
michael@0 | 246 | // and return it. Can return -1 on OOM. |
michael@0 | 247 | int32_t FindBlockForIncomingData(TimeStamp aNow, MediaCacheStream* aStream); |
michael@0 | 248 | // Find a reusable block --- a free block, if there is one, otherwise |
michael@0 | 249 | // the reusable block with the latest predicted-next-use, or -1 if |
michael@0 | 250 | // there aren't any freeable blocks. Only block indices less than |
michael@0 | 251 | // aMaxSearchBlockIndex are considered. If aForStream is non-null, |
michael@0 | 252 | // then aForStream and aForStreamBlock indicate what media data will |
michael@0 | 253 | // be placed; FindReusableBlock will favour returning free blocks |
michael@0 | 254 | // near other blocks for that point in the stream. |
michael@0 | 255 | int32_t FindReusableBlock(TimeStamp aNow, |
michael@0 | 256 | MediaCacheStream* aForStream, |
michael@0 | 257 | int32_t aForStreamBlock, |
michael@0 | 258 | int32_t aMaxSearchBlockIndex); |
michael@0 | 259 | bool BlockIsReusable(int32_t aBlockIndex); |
michael@0 | 260 | // Given a list of blocks sorted with the most reusable blocks at the |
michael@0 | 261 | // end, find the last block whose stream is not pinned (if any) |
michael@0 | 262 | // and whose cache entry index is less than aBlockIndexLimit |
michael@0 | 263 | // and append it to aResult. |
michael@0 | 264 | void AppendMostReusableBlock(BlockList* aBlockList, |
michael@0 | 265 | nsTArray<uint32_t>* aResult, |
michael@0 | 266 | int32_t aBlockIndexLimit); |
michael@0 | 267 | |
michael@0 | 268 | enum BlockClass { |
michael@0 | 269 | // block belongs to mMetadataBlockList because data has been consumed |
michael@0 | 270 | // from it in "metadata mode" --- in particular blocks read during |
michael@0 | 271 | // Ogg seeks go into this class. These blocks may have played data |
michael@0 | 272 | // in them too. |
michael@0 | 273 | METADATA_BLOCK, |
michael@0 | 274 | // block belongs to mPlayedBlockList because its offset is |
michael@0 | 275 | // less than the stream's current reader position |
michael@0 | 276 | PLAYED_BLOCK, |
michael@0 | 277 | // block belongs to the stream's mReadaheadBlockList because its |
michael@0 | 278 | // offset is greater than or equal to the stream's current |
michael@0 | 279 | // reader position |
michael@0 | 280 | READAHEAD_BLOCK |
michael@0 | 281 | }; |
michael@0 | 282 | |
michael@0 | 283 | struct BlockOwner { |
michael@0 | 284 | BlockOwner() : mStream(nullptr), mClass(READAHEAD_BLOCK) {} |
michael@0 | 285 | |
michael@0 | 286 | // The stream that owns this block, or null if the block is free. |
michael@0 | 287 | MediaCacheStream* mStream; |
michael@0 | 288 | // The block index in the stream. Valid only if mStream is non-null. |
michael@0 | 289 | uint32_t mStreamBlock; |
michael@0 | 290 | // Time at which this block was last used. Valid only if |
michael@0 | 291 | // mClass is METADATA_BLOCK or PLAYED_BLOCK. |
michael@0 | 292 | TimeStamp mLastUseTime; |
michael@0 | 293 | BlockClass mClass; |
michael@0 | 294 | }; |
michael@0 | 295 | |
michael@0 | 296 | struct Block { |
michael@0 | 297 | // Free blocks have an empty mOwners array |
michael@0 | 298 | nsTArray<BlockOwner> mOwners; |
michael@0 | 299 | }; |
michael@0 | 300 | |
michael@0 | 301 | // Get the BlockList that the block should belong to given its |
michael@0 | 302 | // current owner |
michael@0 | 303 | BlockList* GetListForBlock(BlockOwner* aBlock); |
michael@0 | 304 | // Get the BlockOwner for the given block index and owning stream |
michael@0 | 305 | // (returns null if the stream does not own the block) |
michael@0 | 306 | BlockOwner* GetBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream); |
michael@0 | 307 | // Returns true iff the block is free |
michael@0 | 308 | bool IsBlockFree(int32_t aBlockIndex) |
michael@0 | 309 | { return mIndex[aBlockIndex].mOwners.IsEmpty(); } |
michael@0 | 310 | // Add the block to the free list and mark its streams as not having |
michael@0 | 311 | // the block in cache |
michael@0 | 312 | void FreeBlock(int32_t aBlock); |
michael@0 | 313 | // Mark aStream as not having the block, removing it as an owner. If |
michael@0 | 314 | // the block has no more owners it's added to the free list. |
michael@0 | 315 | void RemoveBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream); |
michael@0 | 316 | // Swap all metadata associated with the two blocks. The caller |
michael@0 | 317 | // is responsible for swapping up any cache file state. |
michael@0 | 318 | void SwapBlocks(int32_t aBlockIndex1, int32_t aBlockIndex2); |
michael@0 | 319 | // Insert the block into the readahead block list for the stream |
michael@0 | 320 | // at the right point in the list. |
michael@0 | 321 | void InsertReadaheadBlock(BlockOwner* aBlockOwner, int32_t aBlockIndex); |
michael@0 | 322 | |
michael@0 | 323 | // Guess the duration until block aBlock will be next used |
michael@0 | 324 | TimeDuration PredictNextUse(TimeStamp aNow, int32_t aBlock); |
michael@0 | 325 | // Guess the duration until the next incoming data on aStream will be used |
michael@0 | 326 | TimeDuration PredictNextUseForIncomingData(MediaCacheStream* aStream); |
michael@0 | 327 | |
michael@0 | 328 | // Truncate the file and index array if there are free blocks at the |
michael@0 | 329 | // end |
michael@0 | 330 | void Truncate(); |
michael@0 | 331 | |
michael@0 | 332 | // This member is main-thread only. It's used to allocate unique |
michael@0 | 333 | // resource IDs to streams. |
michael@0 | 334 | int64_t mNextResourceID; |
michael@0 | 335 | |
michael@0 | 336 | // The monitor protects all the data members here. Also, off-main-thread |
michael@0 | 337 | // readers that need to block will Wait() on this monitor. When new |
michael@0 | 338 | // data becomes available in the cache, we NotifyAll() on this monitor. |
michael@0 | 339 | ReentrantMonitor mReentrantMonitor; |
michael@0 | 340 | // This is only written while on the main thread and the monitor is held. |
michael@0 | 341 | // Thus, it can be safely read from the main thread or while holding the monitor. |
michael@0 | 342 | nsTArray<MediaCacheStream*> mStreams; |
michael@0 | 343 | // The Blocks describing the cache entries. |
michael@0 | 344 | nsTArray<Block> mIndex; |
michael@0 | 345 | // Writer which performs IO, asynchronously writing cache blocks. |
michael@0 | 346 | nsRefPtr<FileBlockCache> mFileCache; |
michael@0 | 347 | // The list of free blocks; they are not ordered. |
michael@0 | 348 | BlockList mFreeBlocks; |
michael@0 | 349 | // True if an event to run Update() has been queued but not processed |
michael@0 | 350 | bool mUpdateQueued; |
michael@0 | 351 | #ifdef DEBUG |
michael@0 | 352 | bool mInUpdate; |
michael@0 | 353 | #endif |
michael@0 | 354 | }; |
michael@0 | 355 | |
michael@0 | 356 | NS_IMETHODIMP |
michael@0 | 357 | MediaCacheFlusher::Observe(nsISupports *aSubject, char const *aTopic, char16_t const *aData) |
michael@0 | 358 | { |
michael@0 | 359 | if (strcmp(aTopic, "last-pb-context-exited") == 0) { |
michael@0 | 360 | MediaCache::Flush(); |
michael@0 | 361 | } |
michael@0 | 362 | if (strcmp(aTopic, "network-clear-cache-stored-anywhere") == 0) { |
michael@0 | 363 | MediaCache::Flush(); |
michael@0 | 364 | } |
michael@0 | 365 | return NS_OK; |
michael@0 | 366 | } |
michael@0 | 367 | |
michael@0 | 368 | MediaCacheStream::MediaCacheStream(ChannelMediaResource* aClient) |
michael@0 | 369 | : mClient(aClient), |
michael@0 | 370 | mInitialized(false), |
michael@0 | 371 | mHasHadUpdate(false), |
michael@0 | 372 | mClosed(false), |
michael@0 | 373 | mDidNotifyDataEnded(false), |
michael@0 | 374 | mResourceID(0), |
michael@0 | 375 | mIsTransportSeekable(false), |
michael@0 | 376 | mCacheSuspended(false), |
michael@0 | 377 | mChannelEnded(false), |
michael@0 | 378 | mChannelOffset(0), |
michael@0 | 379 | mStreamLength(-1), |
michael@0 | 380 | mStreamOffset(0), |
michael@0 | 381 | mPlaybackBytesPerSecond(10000), |
michael@0 | 382 | mPinCount(0), |
michael@0 | 383 | mCurrentMode(MODE_PLAYBACK), |
michael@0 | 384 | mMetadataInPartialBlockBuffer(false), |
michael@0 | 385 | mPartialBlockBuffer(new int64_t[BLOCK_SIZE/sizeof(int64_t)]) |
michael@0 | 386 | { |
michael@0 | 387 | } |
michael@0 | 388 | |
michael@0 | 389 | size_t MediaCacheStream::SizeOfExcludingThis( |
michael@0 | 390 | MallocSizeOf aMallocSizeOf) const |
michael@0 | 391 | { |
michael@0 | 392 | // Looks like these are not owned: |
michael@0 | 393 | // - mClient |
michael@0 | 394 | // - mPrincipal |
michael@0 | 395 | size_t size = mBlocks.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 396 | size += mReadaheadBlocks.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 397 | size += mMetadataBlocks.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 398 | size += mPlayedBlocks.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 399 | size += mPartialBlockBuffer.SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 400 | |
michael@0 | 401 | return size; |
michael@0 | 402 | } |
michael@0 | 403 | |
michael@0 | 404 | size_t MediaCacheStream::BlockList::SizeOfExcludingThis( |
michael@0 | 405 | MallocSizeOf aMallocSizeOf) const |
michael@0 | 406 | { |
michael@0 | 407 | return mEntries.SizeOfExcludingThis(/* sizeOfEntryExcludingThis = */ nullptr, |
michael@0 | 408 | aMallocSizeOf); |
michael@0 | 409 | } |
michael@0 | 410 | |
michael@0 | 411 | void MediaCacheStream::BlockList::AddFirstBlock(int32_t aBlock) |
michael@0 | 412 | { |
michael@0 | 413 | NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); |
michael@0 | 414 | Entry* entry = mEntries.PutEntry(aBlock); |
michael@0 | 415 | |
michael@0 | 416 | if (mFirstBlock < 0) { |
michael@0 | 417 | entry->mNextBlock = entry->mPrevBlock = aBlock; |
michael@0 | 418 | } else { |
michael@0 | 419 | entry->mNextBlock = mFirstBlock; |
michael@0 | 420 | entry->mPrevBlock = mEntries.GetEntry(mFirstBlock)->mPrevBlock; |
michael@0 | 421 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; |
michael@0 | 422 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; |
michael@0 | 423 | } |
michael@0 | 424 | mFirstBlock = aBlock; |
michael@0 | 425 | ++mCount; |
michael@0 | 426 | } |
michael@0 | 427 | |
michael@0 | 428 | void MediaCacheStream::BlockList::AddAfter(int32_t aBlock, int32_t aBefore) |
michael@0 | 429 | { |
michael@0 | 430 | NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list"); |
michael@0 | 431 | Entry* entry = mEntries.PutEntry(aBlock); |
michael@0 | 432 | |
michael@0 | 433 | Entry* addAfter = mEntries.GetEntry(aBefore); |
michael@0 | 434 | NS_ASSERTION(addAfter, "aBefore not in list"); |
michael@0 | 435 | |
michael@0 | 436 | entry->mNextBlock = addAfter->mNextBlock; |
michael@0 | 437 | entry->mPrevBlock = aBefore; |
michael@0 | 438 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock; |
michael@0 | 439 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock; |
michael@0 | 440 | ++mCount; |
michael@0 | 441 | } |
michael@0 | 442 | |
michael@0 | 443 | void MediaCacheStream::BlockList::RemoveBlock(int32_t aBlock) |
michael@0 | 444 | { |
michael@0 | 445 | Entry* entry = mEntries.GetEntry(aBlock); |
michael@0 | 446 | NS_ASSERTION(entry, "Block not in list"); |
michael@0 | 447 | |
michael@0 | 448 | if (entry->mNextBlock == aBlock) { |
michael@0 | 449 | NS_ASSERTION(entry->mPrevBlock == aBlock, "Linked list inconsistency"); |
michael@0 | 450 | NS_ASSERTION(mFirstBlock == aBlock, "Linked list inconsistency"); |
michael@0 | 451 | mFirstBlock = -1; |
michael@0 | 452 | } else { |
michael@0 | 453 | if (mFirstBlock == aBlock) { |
michael@0 | 454 | mFirstBlock = entry->mNextBlock; |
michael@0 | 455 | } |
michael@0 | 456 | mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = entry->mPrevBlock; |
michael@0 | 457 | mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = entry->mNextBlock; |
michael@0 | 458 | } |
michael@0 | 459 | mEntries.RemoveEntry(aBlock); |
michael@0 | 460 | --mCount; |
michael@0 | 461 | } |
michael@0 | 462 | |
michael@0 | 463 | int32_t MediaCacheStream::BlockList::GetLastBlock() const |
michael@0 | 464 | { |
michael@0 | 465 | if (mFirstBlock < 0) |
michael@0 | 466 | return -1; |
michael@0 | 467 | return mEntries.GetEntry(mFirstBlock)->mPrevBlock; |
michael@0 | 468 | } |
michael@0 | 469 | |
michael@0 | 470 | int32_t MediaCacheStream::BlockList::GetNextBlock(int32_t aBlock) const |
michael@0 | 471 | { |
michael@0 | 472 | int32_t block = mEntries.GetEntry(aBlock)->mNextBlock; |
michael@0 | 473 | if (block == mFirstBlock) |
michael@0 | 474 | return -1; |
michael@0 | 475 | return block; |
michael@0 | 476 | } |
michael@0 | 477 | |
michael@0 | 478 | int32_t MediaCacheStream::BlockList::GetPrevBlock(int32_t aBlock) const |
michael@0 | 479 | { |
michael@0 | 480 | if (aBlock == mFirstBlock) |
michael@0 | 481 | return -1; |
michael@0 | 482 | return mEntries.GetEntry(aBlock)->mPrevBlock; |
michael@0 | 483 | } |
michael@0 | 484 | |
michael@0 | 485 | #ifdef DEBUG |
michael@0 | 486 | void MediaCacheStream::BlockList::Verify() |
michael@0 | 487 | { |
michael@0 | 488 | int32_t count = 0; |
michael@0 | 489 | if (mFirstBlock >= 0) { |
michael@0 | 490 | int32_t block = mFirstBlock; |
michael@0 | 491 | do { |
michael@0 | 492 | Entry* entry = mEntries.GetEntry(block); |
michael@0 | 493 | NS_ASSERTION(mEntries.GetEntry(entry->mNextBlock)->mPrevBlock == block, |
michael@0 | 494 | "Bad prev link"); |
michael@0 | 495 | NS_ASSERTION(mEntries.GetEntry(entry->mPrevBlock)->mNextBlock == block, |
michael@0 | 496 | "Bad next link"); |
michael@0 | 497 | block = entry->mNextBlock; |
michael@0 | 498 | ++count; |
michael@0 | 499 | } while (block != mFirstBlock); |
michael@0 | 500 | } |
michael@0 | 501 | NS_ASSERTION(count == mCount, "Bad count"); |
michael@0 | 502 | } |
michael@0 | 503 | #endif |
michael@0 | 504 | |
michael@0 | 505 | static void UpdateSwappedBlockIndex(int32_t* aBlockIndex, |
michael@0 | 506 | int32_t aBlock1Index, int32_t aBlock2Index) |
michael@0 | 507 | { |
michael@0 | 508 | int32_t index = *aBlockIndex; |
michael@0 | 509 | if (index == aBlock1Index) { |
michael@0 | 510 | *aBlockIndex = aBlock2Index; |
michael@0 | 511 | } else if (index == aBlock2Index) { |
michael@0 | 512 | *aBlockIndex = aBlock1Index; |
michael@0 | 513 | } |
michael@0 | 514 | } |
michael@0 | 515 | |
michael@0 | 516 | void |
michael@0 | 517 | MediaCacheStream::BlockList::NotifyBlockSwapped(int32_t aBlockIndex1, |
michael@0 | 518 | int32_t aBlockIndex2) |
michael@0 | 519 | { |
michael@0 | 520 | Entry* e1 = mEntries.GetEntry(aBlockIndex1); |
michael@0 | 521 | Entry* e2 = mEntries.GetEntry(aBlockIndex2); |
michael@0 | 522 | int32_t e1Prev = -1, e1Next = -1, e2Prev = -1, e2Next = -1; |
michael@0 | 523 | |
michael@0 | 524 | // Fix mFirstBlock |
michael@0 | 525 | UpdateSwappedBlockIndex(&mFirstBlock, aBlockIndex1, aBlockIndex2); |
michael@0 | 526 | |
michael@0 | 527 | // Fix mNextBlock/mPrevBlock links. First capture previous/next links |
michael@0 | 528 | // so we don't get confused due to aliasing. |
michael@0 | 529 | if (e1) { |
michael@0 | 530 | e1Prev = e1->mPrevBlock; |
michael@0 | 531 | e1Next = e1->mNextBlock; |
michael@0 | 532 | } |
michael@0 | 533 | if (e2) { |
michael@0 | 534 | e2Prev = e2->mPrevBlock; |
michael@0 | 535 | e2Next = e2->mNextBlock; |
michael@0 | 536 | } |
michael@0 | 537 | // Update the entries. |
michael@0 | 538 | if (e1) { |
michael@0 | 539 | mEntries.GetEntry(e1Prev)->mNextBlock = aBlockIndex2; |
michael@0 | 540 | mEntries.GetEntry(e1Next)->mPrevBlock = aBlockIndex2; |
michael@0 | 541 | } |
michael@0 | 542 | if (e2) { |
michael@0 | 543 | mEntries.GetEntry(e2Prev)->mNextBlock = aBlockIndex1; |
michael@0 | 544 | mEntries.GetEntry(e2Next)->mPrevBlock = aBlockIndex1; |
michael@0 | 545 | } |
michael@0 | 546 | |
michael@0 | 547 | // Fix hashtable keys. First remove stale entries. |
michael@0 | 548 | if (e1) { |
michael@0 | 549 | e1Prev = e1->mPrevBlock; |
michael@0 | 550 | e1Next = e1->mNextBlock; |
michael@0 | 551 | mEntries.RemoveEntry(aBlockIndex1); |
michael@0 | 552 | // Refresh pointer after hashtable mutation. |
michael@0 | 553 | e2 = mEntries.GetEntry(aBlockIndex2); |
michael@0 | 554 | } |
michael@0 | 555 | if (e2) { |
michael@0 | 556 | e2Prev = e2->mPrevBlock; |
michael@0 | 557 | e2Next = e2->mNextBlock; |
michael@0 | 558 | mEntries.RemoveEntry(aBlockIndex2); |
michael@0 | 559 | } |
michael@0 | 560 | // Put new entries back. |
michael@0 | 561 | if (e1) { |
michael@0 | 562 | e1 = mEntries.PutEntry(aBlockIndex2); |
michael@0 | 563 | e1->mNextBlock = e1Next; |
michael@0 | 564 | e1->mPrevBlock = e1Prev; |
michael@0 | 565 | } |
michael@0 | 566 | if (e2) { |
michael@0 | 567 | e2 = mEntries.PutEntry(aBlockIndex1); |
michael@0 | 568 | e2->mNextBlock = e2Next; |
michael@0 | 569 | e2->mPrevBlock = e2Prev; |
michael@0 | 570 | } |
michael@0 | 571 | } |
michael@0 | 572 | |
michael@0 | 573 | nsresult |
michael@0 | 574 | MediaCache::Init() |
michael@0 | 575 | { |
michael@0 | 576 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 577 | NS_ASSERTION(!mFileCache, "Cache file already open?"); |
michael@0 | 578 | |
michael@0 | 579 | PRFileDesc* fileDesc = nullptr; |
michael@0 | 580 | nsresult rv = NS_OpenAnonymousTemporaryFile(&fileDesc); |
michael@0 | 581 | NS_ENSURE_SUCCESS(rv,rv); |
michael@0 | 582 | |
michael@0 | 583 | mFileCache = new FileBlockCache(); |
michael@0 | 584 | rv = mFileCache->Open(fileDesc); |
michael@0 | 585 | NS_ENSURE_SUCCESS(rv,rv); |
michael@0 | 586 | |
michael@0 | 587 | #ifdef PR_LOGGING |
michael@0 | 588 | if (!gMediaCacheLog) { |
michael@0 | 589 | gMediaCacheLog = PR_NewLogModule("MediaCache"); |
michael@0 | 590 | } |
michael@0 | 591 | #endif |
michael@0 | 592 | |
michael@0 | 593 | MediaCacheFlusher::Init(); |
michael@0 | 594 | |
michael@0 | 595 | return NS_OK; |
michael@0 | 596 | } |
michael@0 | 597 | |
michael@0 | 598 | void |
michael@0 | 599 | MediaCache::Flush() |
michael@0 | 600 | { |
michael@0 | 601 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 602 | |
michael@0 | 603 | if (!gMediaCache) |
michael@0 | 604 | return; |
michael@0 | 605 | |
michael@0 | 606 | gMediaCache->FlushInternal(); |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | void |
michael@0 | 610 | MediaCache::FlushInternal() |
michael@0 | 611 | { |
michael@0 | 612 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 613 | |
michael@0 | 614 | for (uint32_t blockIndex = 0; blockIndex < mIndex.Length(); ++blockIndex) { |
michael@0 | 615 | FreeBlock(blockIndex); |
michael@0 | 616 | } |
michael@0 | 617 | |
michael@0 | 618 | // Truncate file, close it, and reopen |
michael@0 | 619 | Truncate(); |
michael@0 | 620 | NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?"); |
michael@0 | 621 | if (mFileCache) { |
michael@0 | 622 | mFileCache->Close(); |
michael@0 | 623 | mFileCache = nullptr; |
michael@0 | 624 | } |
michael@0 | 625 | Init(); |
michael@0 | 626 | } |
michael@0 | 627 | |
michael@0 | 628 | void |
michael@0 | 629 | MediaCache::MaybeShutdown() |
michael@0 | 630 | { |
michael@0 | 631 | NS_ASSERTION(NS_IsMainThread(), |
michael@0 | 632 | "MediaCache::MaybeShutdown called on non-main thread"); |
michael@0 | 633 | if (!gMediaCache->mStreams.IsEmpty()) { |
michael@0 | 634 | // Don't shut down yet, streams are still alive |
michael@0 | 635 | return; |
michael@0 | 636 | } |
michael@0 | 637 | |
michael@0 | 638 | // Since we're on the main thread, no-one is going to add a new stream |
michael@0 | 639 | // while we shut down. |
michael@0 | 640 | // This function is static so we don't have to delete 'this'. |
michael@0 | 641 | delete gMediaCache; |
michael@0 | 642 | gMediaCache = nullptr; |
michael@0 | 643 | NS_IF_RELEASE(gMediaCacheFlusher); |
michael@0 | 644 | } |
michael@0 | 645 | |
michael@0 | 646 | static void |
michael@0 | 647 | InitMediaCache() |
michael@0 | 648 | { |
michael@0 | 649 | if (gMediaCache) |
michael@0 | 650 | return; |
michael@0 | 651 | |
michael@0 | 652 | gMediaCache = new MediaCache(); |
michael@0 | 653 | if (!gMediaCache) |
michael@0 | 654 | return; |
michael@0 | 655 | |
michael@0 | 656 | nsresult rv = gMediaCache->Init(); |
michael@0 | 657 | if (NS_FAILED(rv)) { |
michael@0 | 658 | delete gMediaCache; |
michael@0 | 659 | gMediaCache = nullptr; |
michael@0 | 660 | } |
michael@0 | 661 | } |
michael@0 | 662 | |
michael@0 | 663 | nsresult |
michael@0 | 664 | MediaCache::ReadCacheFile(int64_t aOffset, void* aData, int32_t aLength, |
michael@0 | 665 | int32_t* aBytes) |
michael@0 | 666 | { |
michael@0 | 667 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 668 | |
michael@0 | 669 | if (!mFileCache) |
michael@0 | 670 | return NS_ERROR_FAILURE; |
michael@0 | 671 | |
michael@0 | 672 | return mFileCache->Read(aOffset, reinterpret_cast<uint8_t*>(aData), aLength, aBytes); |
michael@0 | 673 | } |
michael@0 | 674 | |
michael@0 | 675 | nsresult |
michael@0 | 676 | MediaCache::ReadCacheFileAllBytes(int64_t aOffset, void* aData, int32_t aLength) |
michael@0 | 677 | { |
michael@0 | 678 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 679 | |
michael@0 | 680 | int64_t offset = aOffset; |
michael@0 | 681 | int32_t count = aLength; |
michael@0 | 682 | // Cast to char* so we can do byte-wise pointer arithmetic |
michael@0 | 683 | char* data = static_cast<char*>(aData); |
michael@0 | 684 | while (count > 0) { |
michael@0 | 685 | int32_t bytes; |
michael@0 | 686 | nsresult rv = ReadCacheFile(offset, data, count, &bytes); |
michael@0 | 687 | if (NS_FAILED(rv)) |
michael@0 | 688 | return rv; |
michael@0 | 689 | if (bytes == 0) |
michael@0 | 690 | return NS_ERROR_FAILURE; |
michael@0 | 691 | count -= bytes; |
michael@0 | 692 | data += bytes; |
michael@0 | 693 | offset += bytes; |
michael@0 | 694 | } |
michael@0 | 695 | return NS_OK; |
michael@0 | 696 | } |
michael@0 | 697 | |
michael@0 | 698 | static int32_t GetMaxBlocks() |
michael@0 | 699 | { |
michael@0 | 700 | // We look up the cache size every time. This means dynamic changes |
michael@0 | 701 | // to the pref are applied. |
michael@0 | 702 | // Cache size is in KB |
michael@0 | 703 | int32_t cacheSize = Preferences::GetInt("media.cache_size", 500*1024); |
michael@0 | 704 | int64_t maxBlocks = static_cast<int64_t>(cacheSize)*1024/MediaCache::BLOCK_SIZE; |
michael@0 | 705 | maxBlocks = std::max<int64_t>(maxBlocks, 1); |
michael@0 | 706 | return int32_t(std::min<int64_t>(maxBlocks, INT32_MAX)); |
michael@0 | 707 | } |
michael@0 | 708 | |
michael@0 | 709 | int32_t |
michael@0 | 710 | MediaCache::FindBlockForIncomingData(TimeStamp aNow, |
michael@0 | 711 | MediaCacheStream* aStream) |
michael@0 | 712 | { |
michael@0 | 713 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 714 | |
michael@0 | 715 | int32_t blockIndex = FindReusableBlock(aNow, aStream, |
michael@0 | 716 | aStream->mChannelOffset/BLOCK_SIZE, INT32_MAX); |
michael@0 | 717 | |
michael@0 | 718 | if (blockIndex < 0 || !IsBlockFree(blockIndex)) { |
michael@0 | 719 | // The block returned is already allocated. |
michael@0 | 720 | // Don't reuse it if a) there's room to expand the cache or |
michael@0 | 721 | // b) the data we're going to store in the free block is not higher |
michael@0 | 722 | // priority than the data already stored in the free block. |
michael@0 | 723 | // The latter can lead us to go over the cache limit a bit. |
michael@0 | 724 | if ((mIndex.Length() < uint32_t(GetMaxBlocks()) || blockIndex < 0 || |
michael@0 | 725 | PredictNextUseForIncomingData(aStream) >= PredictNextUse(aNow, blockIndex))) { |
michael@0 | 726 | blockIndex = mIndex.Length(); |
michael@0 | 727 | if (!mIndex.AppendElement()) |
michael@0 | 728 | return -1; |
michael@0 | 729 | mFreeBlocks.AddFirstBlock(blockIndex); |
michael@0 | 730 | return blockIndex; |
michael@0 | 731 | } |
michael@0 | 732 | } |
michael@0 | 733 | |
michael@0 | 734 | return blockIndex; |
michael@0 | 735 | } |
michael@0 | 736 | |
michael@0 | 737 | bool |
michael@0 | 738 | MediaCache::BlockIsReusable(int32_t aBlockIndex) |
michael@0 | 739 | { |
michael@0 | 740 | Block* block = &mIndex[aBlockIndex]; |
michael@0 | 741 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
michael@0 | 742 | MediaCacheStream* stream = block->mOwners[i].mStream; |
michael@0 | 743 | if (stream->mPinCount > 0 || |
michael@0 | 744 | stream->mStreamOffset/BLOCK_SIZE == block->mOwners[i].mStreamBlock) { |
michael@0 | 745 | return false; |
michael@0 | 746 | } |
michael@0 | 747 | } |
michael@0 | 748 | return true; |
michael@0 | 749 | } |
michael@0 | 750 | |
michael@0 | 751 | void |
michael@0 | 752 | MediaCache::AppendMostReusableBlock(BlockList* aBlockList, |
michael@0 | 753 | nsTArray<uint32_t>* aResult, |
michael@0 | 754 | int32_t aBlockIndexLimit) |
michael@0 | 755 | { |
michael@0 | 756 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 757 | |
michael@0 | 758 | int32_t blockIndex = aBlockList->GetLastBlock(); |
michael@0 | 759 | if (blockIndex < 0) |
michael@0 | 760 | return; |
michael@0 | 761 | do { |
michael@0 | 762 | // Don't consider blocks for pinned streams, or blocks that are |
michael@0 | 763 | // beyond the specified limit, or a block that contains a stream's |
michael@0 | 764 | // current read position (such a block contains both played data |
michael@0 | 765 | // and readahead data) |
michael@0 | 766 | if (blockIndex < aBlockIndexLimit && BlockIsReusable(blockIndex)) { |
michael@0 | 767 | aResult->AppendElement(blockIndex); |
michael@0 | 768 | return; |
michael@0 | 769 | } |
michael@0 | 770 | blockIndex = aBlockList->GetPrevBlock(blockIndex); |
michael@0 | 771 | } while (blockIndex >= 0); |
michael@0 | 772 | } |
michael@0 | 773 | |
michael@0 | 774 | int32_t |
michael@0 | 775 | MediaCache::FindReusableBlock(TimeStamp aNow, |
michael@0 | 776 | MediaCacheStream* aForStream, |
michael@0 | 777 | int32_t aForStreamBlock, |
michael@0 | 778 | int32_t aMaxSearchBlockIndex) |
michael@0 | 779 | { |
michael@0 | 780 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 781 | |
michael@0 | 782 | uint32_t length = std::min(uint32_t(aMaxSearchBlockIndex), mIndex.Length()); |
michael@0 | 783 | |
michael@0 | 784 | if (aForStream && aForStreamBlock > 0 && |
michael@0 | 785 | uint32_t(aForStreamBlock) <= aForStream->mBlocks.Length()) { |
michael@0 | 786 | int32_t prevCacheBlock = aForStream->mBlocks[aForStreamBlock - 1]; |
michael@0 | 787 | if (prevCacheBlock >= 0) { |
michael@0 | 788 | uint32_t freeBlockScanEnd = |
michael@0 | 789 | std::min(length, prevCacheBlock + FREE_BLOCK_SCAN_LIMIT); |
michael@0 | 790 | for (uint32_t i = prevCacheBlock; i < freeBlockScanEnd; ++i) { |
michael@0 | 791 | if (IsBlockFree(i)) |
michael@0 | 792 | return i; |
michael@0 | 793 | } |
michael@0 | 794 | } |
michael@0 | 795 | } |
michael@0 | 796 | |
michael@0 | 797 | if (!mFreeBlocks.IsEmpty()) { |
michael@0 | 798 | int32_t blockIndex = mFreeBlocks.GetFirstBlock(); |
michael@0 | 799 | do { |
michael@0 | 800 | if (blockIndex < aMaxSearchBlockIndex) |
michael@0 | 801 | return blockIndex; |
michael@0 | 802 | blockIndex = mFreeBlocks.GetNextBlock(blockIndex); |
michael@0 | 803 | } while (blockIndex >= 0); |
michael@0 | 804 | } |
michael@0 | 805 | |
michael@0 | 806 | // Build a list of the blocks we should consider for the "latest |
michael@0 | 807 | // predicted time of next use". We can exploit the fact that the block |
michael@0 | 808 | // linked lists are ordered by increasing time of next use. This is |
michael@0 | 809 | // actually the whole point of having the linked lists. |
michael@0 | 810 | nsAutoTArray<uint32_t,8> candidates; |
michael@0 | 811 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 812 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 813 | if (stream->mPinCount > 0) { |
michael@0 | 814 | // No point in even looking at this stream's blocks |
michael@0 | 815 | continue; |
michael@0 | 816 | } |
michael@0 | 817 | |
michael@0 | 818 | AppendMostReusableBlock(&stream->mMetadataBlocks, &candidates, length); |
michael@0 | 819 | AppendMostReusableBlock(&stream->mPlayedBlocks, &candidates, length); |
michael@0 | 820 | |
michael@0 | 821 | // Don't consider readahead blocks in non-seekable streams. If we |
michael@0 | 822 | // remove the block we won't be able to seek back to read it later. |
michael@0 | 823 | if (stream->mIsTransportSeekable) { |
michael@0 | 824 | AppendMostReusableBlock(&stream->mReadaheadBlocks, &candidates, length); |
michael@0 | 825 | } |
michael@0 | 826 | } |
michael@0 | 827 | |
michael@0 | 828 | TimeDuration latestUse; |
michael@0 | 829 | int32_t latestUseBlock = -1; |
michael@0 | 830 | for (uint32_t i = 0; i < candidates.Length(); ++i) { |
michael@0 | 831 | TimeDuration nextUse = PredictNextUse(aNow, candidates[i]); |
michael@0 | 832 | if (nextUse > latestUse) { |
michael@0 | 833 | latestUse = nextUse; |
michael@0 | 834 | latestUseBlock = candidates[i]; |
michael@0 | 835 | } |
michael@0 | 836 | } |
michael@0 | 837 | |
michael@0 | 838 | return latestUseBlock; |
michael@0 | 839 | } |
michael@0 | 840 | |
michael@0 | 841 | MediaCache::BlockList* |
michael@0 | 842 | MediaCache::GetListForBlock(BlockOwner* aBlock) |
michael@0 | 843 | { |
michael@0 | 844 | switch (aBlock->mClass) { |
michael@0 | 845 | case METADATA_BLOCK: |
michael@0 | 846 | NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); |
michael@0 | 847 | return &aBlock->mStream->mMetadataBlocks; |
michael@0 | 848 | case PLAYED_BLOCK: |
michael@0 | 849 | NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?"); |
michael@0 | 850 | return &aBlock->mStream->mPlayedBlocks; |
michael@0 | 851 | case READAHEAD_BLOCK: |
michael@0 | 852 | NS_ASSERTION(aBlock->mStream, "Readahead block has no stream?"); |
michael@0 | 853 | return &aBlock->mStream->mReadaheadBlocks; |
michael@0 | 854 | default: |
michael@0 | 855 | NS_ERROR("Invalid block class"); |
michael@0 | 856 | return nullptr; |
michael@0 | 857 | } |
michael@0 | 858 | } |
michael@0 | 859 | |
michael@0 | 860 | MediaCache::BlockOwner* |
michael@0 | 861 | MediaCache::GetBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream) |
michael@0 | 862 | { |
michael@0 | 863 | Block* block = &mIndex[aBlockIndex]; |
michael@0 | 864 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
michael@0 | 865 | if (block->mOwners[i].mStream == aStream) |
michael@0 | 866 | return &block->mOwners[i]; |
michael@0 | 867 | } |
michael@0 | 868 | return nullptr; |
michael@0 | 869 | } |
michael@0 | 870 | |
michael@0 | 871 | void |
michael@0 | 872 | MediaCache::SwapBlocks(int32_t aBlockIndex1, int32_t aBlockIndex2) |
michael@0 | 873 | { |
michael@0 | 874 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 875 | |
michael@0 | 876 | Block* block1 = &mIndex[aBlockIndex1]; |
michael@0 | 877 | Block* block2 = &mIndex[aBlockIndex2]; |
michael@0 | 878 | |
michael@0 | 879 | block1->mOwners.SwapElements(block2->mOwners); |
michael@0 | 880 | |
michael@0 | 881 | // Now all references to block1 have to be replaced with block2 and |
michael@0 | 882 | // vice versa. |
michael@0 | 883 | // First update stream references to blocks via mBlocks. |
michael@0 | 884 | const Block* blocks[] = { block1, block2 }; |
michael@0 | 885 | int32_t blockIndices[] = { aBlockIndex1, aBlockIndex2 }; |
michael@0 | 886 | for (int32_t i = 0; i < 2; ++i) { |
michael@0 | 887 | for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { |
michael@0 | 888 | const BlockOwner* b = &blocks[i]->mOwners[j]; |
michael@0 | 889 | b->mStream->mBlocks[b->mStreamBlock] = blockIndices[i]; |
michael@0 | 890 | } |
michael@0 | 891 | } |
michael@0 | 892 | |
michael@0 | 893 | // Now update references to blocks in block lists. |
michael@0 | 894 | mFreeBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
michael@0 | 895 | |
michael@0 | 896 | nsTHashtable<nsPtrHashKey<MediaCacheStream> > visitedStreams; |
michael@0 | 897 | |
michael@0 | 898 | for (int32_t i = 0; i < 2; ++i) { |
michael@0 | 899 | for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) { |
michael@0 | 900 | MediaCacheStream* stream = blocks[i]->mOwners[j].mStream; |
michael@0 | 901 | // Make sure that we don't update the same stream twice --- that |
michael@0 | 902 | // would result in swapping the block references back again! |
michael@0 | 903 | if (visitedStreams.GetEntry(stream)) |
michael@0 | 904 | continue; |
michael@0 | 905 | visitedStreams.PutEntry(stream); |
michael@0 | 906 | stream->mReadaheadBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
michael@0 | 907 | stream->mPlayedBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
michael@0 | 908 | stream->mMetadataBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2); |
michael@0 | 909 | } |
michael@0 | 910 | } |
michael@0 | 911 | |
michael@0 | 912 | Verify(); |
michael@0 | 913 | } |
michael@0 | 914 | |
michael@0 | 915 | void |
michael@0 | 916 | MediaCache::RemoveBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream) |
michael@0 | 917 | { |
michael@0 | 918 | Block* block = &mIndex[aBlockIndex]; |
michael@0 | 919 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
michael@0 | 920 | BlockOwner* bo = &block->mOwners[i]; |
michael@0 | 921 | if (bo->mStream == aStream) { |
michael@0 | 922 | GetListForBlock(bo)->RemoveBlock(aBlockIndex); |
michael@0 | 923 | bo->mStream->mBlocks[bo->mStreamBlock] = -1; |
michael@0 | 924 | block->mOwners.RemoveElementAt(i); |
michael@0 | 925 | if (block->mOwners.IsEmpty()) { |
michael@0 | 926 | mFreeBlocks.AddFirstBlock(aBlockIndex); |
michael@0 | 927 | } |
michael@0 | 928 | return; |
michael@0 | 929 | } |
michael@0 | 930 | } |
michael@0 | 931 | } |
michael@0 | 932 | |
michael@0 | 933 | void |
michael@0 | 934 | MediaCache::AddBlockOwnerAsReadahead(int32_t aBlockIndex, |
michael@0 | 935 | MediaCacheStream* aStream, |
michael@0 | 936 | int32_t aStreamBlockIndex) |
michael@0 | 937 | { |
michael@0 | 938 | Block* block = &mIndex[aBlockIndex]; |
michael@0 | 939 | if (block->mOwners.IsEmpty()) { |
michael@0 | 940 | mFreeBlocks.RemoveBlock(aBlockIndex); |
michael@0 | 941 | } |
michael@0 | 942 | BlockOwner* bo = block->mOwners.AppendElement(); |
michael@0 | 943 | bo->mStream = aStream; |
michael@0 | 944 | bo->mStreamBlock = aStreamBlockIndex; |
michael@0 | 945 | aStream->mBlocks[aStreamBlockIndex] = aBlockIndex; |
michael@0 | 946 | bo->mClass = READAHEAD_BLOCK; |
michael@0 | 947 | InsertReadaheadBlock(bo, aBlockIndex); |
michael@0 | 948 | } |
michael@0 | 949 | |
michael@0 | 950 | void |
michael@0 | 951 | MediaCache::FreeBlock(int32_t aBlock) |
michael@0 | 952 | { |
michael@0 | 953 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 954 | |
michael@0 | 955 | Block* block = &mIndex[aBlock]; |
michael@0 | 956 | if (block->mOwners.IsEmpty()) { |
michael@0 | 957 | // already free |
michael@0 | 958 | return; |
michael@0 | 959 | } |
michael@0 | 960 | |
michael@0 | 961 | CACHE_LOG(PR_LOG_DEBUG, ("Released block %d", aBlock)); |
michael@0 | 962 | |
michael@0 | 963 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
michael@0 | 964 | BlockOwner* bo = &block->mOwners[i]; |
michael@0 | 965 | GetListForBlock(bo)->RemoveBlock(aBlock); |
michael@0 | 966 | bo->mStream->mBlocks[bo->mStreamBlock] = -1; |
michael@0 | 967 | } |
michael@0 | 968 | block->mOwners.Clear(); |
michael@0 | 969 | mFreeBlocks.AddFirstBlock(aBlock); |
michael@0 | 970 | Verify(); |
michael@0 | 971 | } |
michael@0 | 972 | |
michael@0 | 973 | TimeDuration |
michael@0 | 974 | MediaCache::PredictNextUse(TimeStamp aNow, int32_t aBlock) |
michael@0 | 975 | { |
michael@0 | 976 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 977 | NS_ASSERTION(!IsBlockFree(aBlock), "aBlock is free"); |
michael@0 | 978 | |
michael@0 | 979 | Block* block = &mIndex[aBlock]; |
michael@0 | 980 | // Blocks can be belong to multiple streams. The predicted next use |
michael@0 | 981 | // time is the earliest time predicted by any of the streams. |
michael@0 | 982 | TimeDuration result; |
michael@0 | 983 | for (uint32_t i = 0; i < block->mOwners.Length(); ++i) { |
michael@0 | 984 | BlockOwner* bo = &block->mOwners[i]; |
michael@0 | 985 | TimeDuration prediction; |
michael@0 | 986 | switch (bo->mClass) { |
michael@0 | 987 | case METADATA_BLOCK: |
michael@0 | 988 | // This block should be managed in LRU mode. For metadata we predict |
michael@0 | 989 | // that the time until the next use is the time since the last use. |
michael@0 | 990 | prediction = aNow - bo->mLastUseTime; |
michael@0 | 991 | break; |
michael@0 | 992 | case PLAYED_BLOCK: { |
michael@0 | 993 | // This block should be managed in LRU mode, and we should impose |
michael@0 | 994 | // a "replay delay" to reflect the likelihood of replay happening |
michael@0 | 995 | NS_ASSERTION(static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE < |
michael@0 | 996 | bo->mStream->mStreamOffset, |
michael@0 | 997 | "Played block after the current stream position?"); |
michael@0 | 998 | int64_t bytesBehind = |
michael@0 | 999 | bo->mStream->mStreamOffset - static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE; |
michael@0 | 1000 | int64_t millisecondsBehind = |
michael@0 | 1001 | bytesBehind*1000/bo->mStream->mPlaybackBytesPerSecond; |
michael@0 | 1002 | prediction = TimeDuration::FromMilliseconds( |
michael@0 | 1003 | std::min<int64_t>(millisecondsBehind*REPLAY_PENALTY_FACTOR, INT32_MAX)); |
michael@0 | 1004 | break; |
michael@0 | 1005 | } |
michael@0 | 1006 | case READAHEAD_BLOCK: { |
michael@0 | 1007 | int64_t bytesAhead = |
michael@0 | 1008 | static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset; |
michael@0 | 1009 | NS_ASSERTION(bytesAhead >= 0, |
michael@0 | 1010 | "Readahead block before the current stream position?"); |
michael@0 | 1011 | int64_t millisecondsAhead = |
michael@0 | 1012 | bytesAhead*1000/bo->mStream->mPlaybackBytesPerSecond; |
michael@0 | 1013 | prediction = TimeDuration::FromMilliseconds( |
michael@0 | 1014 | std::min<int64_t>(millisecondsAhead, INT32_MAX)); |
michael@0 | 1015 | break; |
michael@0 | 1016 | } |
michael@0 | 1017 | default: |
michael@0 | 1018 | NS_ERROR("Invalid class for predicting next use"); |
michael@0 | 1019 | return TimeDuration(0); |
michael@0 | 1020 | } |
michael@0 | 1021 | if (i == 0 || prediction < result) { |
michael@0 | 1022 | result = prediction; |
michael@0 | 1023 | } |
michael@0 | 1024 | } |
michael@0 | 1025 | return result; |
michael@0 | 1026 | } |
michael@0 | 1027 | |
michael@0 | 1028 | TimeDuration |
michael@0 | 1029 | MediaCache::PredictNextUseForIncomingData(MediaCacheStream* aStream) |
michael@0 | 1030 | { |
michael@0 | 1031 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1032 | |
michael@0 | 1033 | int64_t bytesAhead = aStream->mChannelOffset - aStream->mStreamOffset; |
michael@0 | 1034 | if (bytesAhead <= -BLOCK_SIZE) { |
michael@0 | 1035 | // Hmm, no idea when data behind us will be used. Guess 24 hours. |
michael@0 | 1036 | return TimeDuration::FromSeconds(24*60*60); |
michael@0 | 1037 | } |
michael@0 | 1038 | if (bytesAhead <= 0) |
michael@0 | 1039 | return TimeDuration(0); |
michael@0 | 1040 | int64_t millisecondsAhead = bytesAhead*1000/aStream->mPlaybackBytesPerSecond; |
michael@0 | 1041 | return TimeDuration::FromMilliseconds( |
michael@0 | 1042 | std::min<int64_t>(millisecondsAhead, INT32_MAX)); |
michael@0 | 1043 | } |
michael@0 | 1044 | |
michael@0 | 1045 | enum StreamAction { NONE, SEEK, SEEK_AND_RESUME, RESUME, SUSPEND }; |
michael@0 | 1046 | |
michael@0 | 1047 | void |
michael@0 | 1048 | MediaCache::Update() |
michael@0 | 1049 | { |
michael@0 | 1050 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1051 | |
michael@0 | 1052 | // The action to use for each stream. We store these so we can make |
michael@0 | 1053 | // decisions while holding the cache lock but implement those decisions |
michael@0 | 1054 | // without holding the cache lock, since we need to call out to |
michael@0 | 1055 | // stream, decoder and element code. |
michael@0 | 1056 | nsAutoTArray<StreamAction,10> actions; |
michael@0 | 1057 | |
michael@0 | 1058 | { |
michael@0 | 1059 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 1060 | mUpdateQueued = false; |
michael@0 | 1061 | #ifdef DEBUG |
michael@0 | 1062 | mInUpdate = true; |
michael@0 | 1063 | #endif |
michael@0 | 1064 | |
michael@0 | 1065 | int32_t maxBlocks = GetMaxBlocks(); |
michael@0 | 1066 | TimeStamp now = TimeStamp::Now(); |
michael@0 | 1067 | |
michael@0 | 1068 | int32_t freeBlockCount = mFreeBlocks.GetCount(); |
michael@0 | 1069 | TimeDuration latestPredictedUseForOverflow = 0; |
michael@0 | 1070 | if (mIndex.Length() > uint32_t(maxBlocks)) { |
michael@0 | 1071 | // Try to trim back the cache to its desired maximum size. The cache may |
michael@0 | 1072 | // have overflowed simply due to data being received when we have |
michael@0 | 1073 | // no blocks in the main part of the cache that are free or lower |
michael@0 | 1074 | // priority than the new data. The cache can also be overflowing because |
michael@0 | 1075 | // the media.cache_size preference was reduced. |
michael@0 | 1076 | // First, figure out what the least valuable block in the cache overflow |
michael@0 | 1077 | // is. We don't want to replace any blocks in the main part of the |
michael@0 | 1078 | // cache whose expected time of next use is earlier or equal to that. |
michael@0 | 1079 | // If we allow that, we can effectively end up discarding overflowing |
michael@0 | 1080 | // blocks (by moving an overflowing block to the main part of the cache, |
michael@0 | 1081 | // and then overwriting it with another overflowing block), and we try |
michael@0 | 1082 | // to avoid that since it requires HTTP seeks. |
michael@0 | 1083 | // We also use this loop to eliminate overflowing blocks from |
michael@0 | 1084 | // freeBlockCount. |
michael@0 | 1085 | for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; |
michael@0 | 1086 | --blockIndex) { |
michael@0 | 1087 | if (IsBlockFree(blockIndex)) { |
michael@0 | 1088 | // Don't count overflowing free blocks in our free block count |
michael@0 | 1089 | --freeBlockCount; |
michael@0 | 1090 | continue; |
michael@0 | 1091 | } |
michael@0 | 1092 | TimeDuration predictedUse = PredictNextUse(now, blockIndex); |
michael@0 | 1093 | latestPredictedUseForOverflow = std::max(latestPredictedUseForOverflow, predictedUse); |
michael@0 | 1094 | } |
michael@0 | 1095 | } else { |
michael@0 | 1096 | freeBlockCount += maxBlocks - mIndex.Length(); |
michael@0 | 1097 | } |
michael@0 | 1098 | |
michael@0 | 1099 | // Now try to move overflowing blocks to the main part of the cache. |
michael@0 | 1100 | for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks; |
michael@0 | 1101 | --blockIndex) { |
michael@0 | 1102 | if (IsBlockFree(blockIndex)) |
michael@0 | 1103 | continue; |
michael@0 | 1104 | |
michael@0 | 1105 | Block* block = &mIndex[blockIndex]; |
michael@0 | 1106 | // Try to relocate the block close to other blocks for the first stream. |
michael@0 | 1107 | // There is no point in trying to make it close to other blocks in |
michael@0 | 1108 | // *all* the streams it might belong to. |
michael@0 | 1109 | int32_t destinationBlockIndex = |
michael@0 | 1110 | FindReusableBlock(now, block->mOwners[0].mStream, |
michael@0 | 1111 | block->mOwners[0].mStreamBlock, maxBlocks); |
michael@0 | 1112 | if (destinationBlockIndex < 0) { |
michael@0 | 1113 | // Nowhere to place this overflow block. We won't be able to |
michael@0 | 1114 | // place any more overflow blocks. |
michael@0 | 1115 | break; |
michael@0 | 1116 | } |
michael@0 | 1117 | |
michael@0 | 1118 | if (IsBlockFree(destinationBlockIndex) || |
michael@0 | 1119 | PredictNextUse(now, destinationBlockIndex) > latestPredictedUseForOverflow) { |
michael@0 | 1120 | // Reuse blocks in the main part of the cache that are less useful than |
michael@0 | 1121 | // the least useful overflow blocks |
michael@0 | 1122 | |
michael@0 | 1123 | nsresult rv = mFileCache->MoveBlock(blockIndex, destinationBlockIndex); |
michael@0 | 1124 | |
michael@0 | 1125 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1126 | // We successfully copied the file data. |
michael@0 | 1127 | CACHE_LOG(PR_LOG_DEBUG, ("Swapping blocks %d and %d (trimming cache)", |
michael@0 | 1128 | blockIndex, destinationBlockIndex)); |
michael@0 | 1129 | // Swapping the block metadata here lets us maintain the |
michael@0 | 1130 | // correct positions in the linked lists |
michael@0 | 1131 | SwapBlocks(blockIndex, destinationBlockIndex); |
michael@0 | 1132 | //Free the overflowing block even if the copy failed. |
michael@0 | 1133 | CACHE_LOG(PR_LOG_DEBUG, ("Released block %d (trimming cache)", blockIndex)); |
michael@0 | 1134 | FreeBlock(blockIndex); |
michael@0 | 1135 | } |
michael@0 | 1136 | } else { |
michael@0 | 1137 | CACHE_LOG(PR_LOG_DEBUG, ("Could not trim cache block %d (destination %d, predicted next use %f, latest predicted use for overflow %f", |
michael@0 | 1138 | blockIndex, destinationBlockIndex, |
michael@0 | 1139 | PredictNextUse(now, destinationBlockIndex).ToSeconds(), |
michael@0 | 1140 | latestPredictedUseForOverflow.ToSeconds())); |
michael@0 | 1141 | } |
michael@0 | 1142 | } |
michael@0 | 1143 | // Try chopping back the array of cache entries and the cache file. |
michael@0 | 1144 | Truncate(); |
michael@0 | 1145 | |
michael@0 | 1146 | // Count the blocks allocated for readahead of non-seekable streams |
michael@0 | 1147 | // (these blocks can't be freed but we don't want them to monopolize the |
michael@0 | 1148 | // cache) |
michael@0 | 1149 | int32_t nonSeekableReadaheadBlockCount = 0; |
michael@0 | 1150 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 1151 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 1152 | if (!stream->mIsTransportSeekable) { |
michael@0 | 1153 | nonSeekableReadaheadBlockCount += stream->mReadaheadBlocks.GetCount(); |
michael@0 | 1154 | } |
michael@0 | 1155 | } |
michael@0 | 1156 | |
michael@0 | 1157 | // If freeBlockCount is zero, then compute the latest of |
michael@0 | 1158 | // the predicted next-uses for all blocks |
michael@0 | 1159 | TimeDuration latestNextUse; |
michael@0 | 1160 | if (freeBlockCount == 0) { |
michael@0 | 1161 | int32_t reusableBlock = FindReusableBlock(now, nullptr, 0, maxBlocks); |
michael@0 | 1162 | if (reusableBlock >= 0) { |
michael@0 | 1163 | latestNextUse = PredictNextUse(now, reusableBlock); |
michael@0 | 1164 | } |
michael@0 | 1165 | } |
michael@0 | 1166 | |
michael@0 | 1167 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 1168 | actions.AppendElement(NONE); |
michael@0 | 1169 | |
michael@0 | 1170 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 1171 | if (stream->mClosed) |
michael@0 | 1172 | continue; |
michael@0 | 1173 | |
michael@0 | 1174 | // Figure out where we should be reading from. It's the first |
michael@0 | 1175 | // uncached byte after the current mStreamOffset. |
michael@0 | 1176 | int64_t dataOffset = stream->GetCachedDataEndInternal(stream->mStreamOffset); |
michael@0 | 1177 | MOZ_ASSERT(dataOffset >= 0); |
michael@0 | 1178 | |
michael@0 | 1179 | // Compute where we'd actually seek to to read at readOffset |
michael@0 | 1180 | int64_t desiredOffset = dataOffset; |
michael@0 | 1181 | if (stream->mIsTransportSeekable) { |
michael@0 | 1182 | if (desiredOffset > stream->mChannelOffset && |
michael@0 | 1183 | desiredOffset <= stream->mChannelOffset + SEEK_VS_READ_THRESHOLD) { |
michael@0 | 1184 | // Assume it's more efficient to just keep reading up to the |
michael@0 | 1185 | // desired position instead of trying to seek |
michael@0 | 1186 | desiredOffset = stream->mChannelOffset; |
michael@0 | 1187 | } |
michael@0 | 1188 | } else { |
michael@0 | 1189 | // We can't seek directly to the desired offset... |
michael@0 | 1190 | if (stream->mChannelOffset > desiredOffset) { |
michael@0 | 1191 | // Reading forward won't get us anywhere, we need to go backwards. |
michael@0 | 1192 | // Seek back to 0 (the client will reopen the stream) and then |
michael@0 | 1193 | // read forward. |
michael@0 | 1194 | NS_WARNING("Can't seek backwards, so seeking to 0"); |
michael@0 | 1195 | desiredOffset = 0; |
michael@0 | 1196 | // Flush cached blocks out, since if this is a live stream |
michael@0 | 1197 | // the cached data may be completely different next time we |
michael@0 | 1198 | // read it. We have to assume that live streams don't |
michael@0 | 1199 | // advertise themselves as being seekable... |
michael@0 | 1200 | ReleaseStreamBlocks(stream); |
michael@0 | 1201 | } else { |
michael@0 | 1202 | // otherwise reading forward is looking good, so just stay where we |
michael@0 | 1203 | // are and don't trigger a channel seek! |
michael@0 | 1204 | desiredOffset = stream->mChannelOffset; |
michael@0 | 1205 | } |
michael@0 | 1206 | } |
michael@0 | 1207 | |
michael@0 | 1208 | // Figure out if we should be reading data now or not. It's amazing |
michael@0 | 1209 | // how complex this is, but each decision is simple enough. |
michael@0 | 1210 | bool enableReading; |
michael@0 | 1211 | if (stream->mStreamLength >= 0 && dataOffset >= stream->mStreamLength) { |
michael@0 | 1212 | // We want data at the end of the stream, where there's nothing to |
michael@0 | 1213 | // read. We don't want to try to read if we're suspended, because that |
michael@0 | 1214 | // might create a new channel and seek unnecessarily (and incorrectly, |
michael@0 | 1215 | // since HTTP doesn't allow seeking to the actual EOF), and we don't want |
michael@0 | 1216 | // to suspend if we're not suspended and already reading at the end of |
michael@0 | 1217 | // the stream, since there just might be more data than the server |
michael@0 | 1218 | // advertised with Content-Length, and we may as well keep reading. |
michael@0 | 1219 | // But we don't want to seek to the end of the stream if we're not |
michael@0 | 1220 | // already there. |
michael@0 | 1221 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p at end of stream", stream)); |
michael@0 | 1222 | enableReading = !stream->mCacheSuspended && |
michael@0 | 1223 | stream->mStreamLength == stream->mChannelOffset; |
michael@0 | 1224 | } else if (desiredOffset < stream->mStreamOffset) { |
michael@0 | 1225 | // We're reading to try to catch up to where the current stream |
michael@0 | 1226 | // reader wants to be. Better not stop. |
michael@0 | 1227 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p catching up", stream)); |
michael@0 | 1228 | enableReading = true; |
michael@0 | 1229 | } else if (desiredOffset < stream->mStreamOffset + BLOCK_SIZE) { |
michael@0 | 1230 | // The stream reader is waiting for us, or nearly so. Better feed it. |
michael@0 | 1231 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p feeding reader", stream)); |
michael@0 | 1232 | enableReading = true; |
michael@0 | 1233 | } else if (!stream->mIsTransportSeekable && |
michael@0 | 1234 | nonSeekableReadaheadBlockCount >= maxBlocks*NONSEEKABLE_READAHEAD_MAX) { |
michael@0 | 1235 | // This stream is not seekable and there are already too many blocks |
michael@0 | 1236 | // being cached for readahead for nonseekable streams (which we can't |
michael@0 | 1237 | // free). So stop reading ahead now. |
michael@0 | 1238 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling non-seekable readahead", stream)); |
michael@0 | 1239 | enableReading = false; |
michael@0 | 1240 | } else if (mIndex.Length() > uint32_t(maxBlocks)) { |
michael@0 | 1241 | // We're in the process of bringing the cache size back to the |
michael@0 | 1242 | // desired limit, so don't bring in more data yet |
michael@0 | 1243 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling to reduce cache size", stream)); |
michael@0 | 1244 | enableReading = false; |
michael@0 | 1245 | } else { |
michael@0 | 1246 | TimeDuration predictedNewDataUse = PredictNextUseForIncomingData(stream); |
michael@0 | 1247 | |
michael@0 | 1248 | if (stream->mCacheSuspended && |
michael@0 | 1249 | predictedNewDataUse.ToMilliseconds() > CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS) { |
michael@0 | 1250 | // Don't need data for a while, so don't bother waking up the stream |
michael@0 | 1251 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p avoiding wakeup since more data is not needed", stream)); |
michael@0 | 1252 | enableReading = false; |
michael@0 | 1253 | } else if (freeBlockCount > 0) { |
michael@0 | 1254 | // Free blocks in the cache, so keep reading |
michael@0 | 1255 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p reading since there are free blocks", stream)); |
michael@0 | 1256 | enableReading = true; |
michael@0 | 1257 | } else if (latestNextUse <= TimeDuration(0)) { |
michael@0 | 1258 | // No reusable blocks, so can't read anything |
michael@0 | 1259 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling due to no reusable blocks", stream)); |
michael@0 | 1260 | enableReading = false; |
michael@0 | 1261 | } else { |
michael@0 | 1262 | // Read ahead if the data we expect to read is more valuable than |
michael@0 | 1263 | // the least valuable block in the main part of the cache |
michael@0 | 1264 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p predict next data in %f, current worst block is %f", |
michael@0 | 1265 | stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds())); |
michael@0 | 1266 | enableReading = predictedNewDataUse < latestNextUse; |
michael@0 | 1267 | } |
michael@0 | 1268 | } |
michael@0 | 1269 | |
michael@0 | 1270 | if (enableReading) { |
michael@0 | 1271 | for (uint32_t j = 0; j < i; ++j) { |
michael@0 | 1272 | MediaCacheStream* other = mStreams[j]; |
michael@0 | 1273 | if (other->mResourceID == stream->mResourceID && |
michael@0 | 1274 | !other->mClient->IsSuspended() && |
michael@0 | 1275 | other->mChannelOffset/BLOCK_SIZE == desiredOffset/BLOCK_SIZE) { |
michael@0 | 1276 | // This block is already going to be read by the other stream. |
michael@0 | 1277 | // So don't try to read it from this stream as well. |
michael@0 | 1278 | enableReading = false; |
michael@0 | 1279 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p waiting on same block (%lld) from stream %p", |
michael@0 | 1280 | stream, desiredOffset/BLOCK_SIZE, other)); |
michael@0 | 1281 | break; |
michael@0 | 1282 | } |
michael@0 | 1283 | } |
michael@0 | 1284 | } |
michael@0 | 1285 | |
michael@0 | 1286 | if (stream->mChannelOffset != desiredOffset && enableReading) { |
michael@0 | 1287 | // We need to seek now. |
michael@0 | 1288 | NS_ASSERTION(stream->mIsTransportSeekable || desiredOffset == 0, |
michael@0 | 1289 | "Trying to seek in a non-seekable stream!"); |
michael@0 | 1290 | // Round seek offset down to the start of the block. This is essential |
michael@0 | 1291 | // because we don't want to think we have part of a block already |
michael@0 | 1292 | // in mPartialBlockBuffer. |
michael@0 | 1293 | stream->mChannelOffset = (desiredOffset/BLOCK_SIZE)*BLOCK_SIZE; |
michael@0 | 1294 | actions[i] = stream->mCacheSuspended ? SEEK_AND_RESUME : SEEK; |
michael@0 | 1295 | } else if (enableReading && stream->mCacheSuspended) { |
michael@0 | 1296 | actions[i] = RESUME; |
michael@0 | 1297 | } else if (!enableReading && !stream->mCacheSuspended) { |
michael@0 | 1298 | actions[i] = SUSPEND; |
michael@0 | 1299 | } |
michael@0 | 1300 | } |
michael@0 | 1301 | #ifdef DEBUG |
michael@0 | 1302 | mInUpdate = false; |
michael@0 | 1303 | #endif |
michael@0 | 1304 | } |
michael@0 | 1305 | |
michael@0 | 1306 | // Update the channel state without holding our cache lock. While we're |
michael@0 | 1307 | // doing this, decoder threads may be running and seeking, reading or changing |
michael@0 | 1308 | // other cache state. That's OK, they'll trigger new Update events and we'll |
michael@0 | 1309 | // get back here and revise our decisions. The important thing here is that |
michael@0 | 1310 | // performing these actions only depends on mChannelOffset and |
michael@0 | 1311 | // the action, which can only be written by the main thread (i.e., this |
michael@0 | 1312 | // thread), so we don't have races here. |
michael@0 | 1313 | |
michael@0 | 1314 | // First, update the mCacheSuspended/mCacheEnded flags so that they're all correct |
michael@0 | 1315 | // when we fire our CacheClient commands below. Those commands can rely on these flags |
michael@0 | 1316 | // being set correctly for all streams. |
michael@0 | 1317 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 1318 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 1319 | switch (actions[i]) { |
michael@0 | 1320 | case SEEK: |
michael@0 | 1321 | case SEEK_AND_RESUME: |
michael@0 | 1322 | stream->mCacheSuspended = false; |
michael@0 | 1323 | stream->mChannelEnded = false; |
michael@0 | 1324 | break; |
michael@0 | 1325 | case RESUME: |
michael@0 | 1326 | stream->mCacheSuspended = false; |
michael@0 | 1327 | break; |
michael@0 | 1328 | case SUSPEND: |
michael@0 | 1329 | stream->mCacheSuspended = true; |
michael@0 | 1330 | break; |
michael@0 | 1331 | default: |
michael@0 | 1332 | break; |
michael@0 | 1333 | } |
michael@0 | 1334 | stream->mHasHadUpdate = true; |
michael@0 | 1335 | } |
michael@0 | 1336 | |
michael@0 | 1337 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 1338 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 1339 | nsresult rv; |
michael@0 | 1340 | switch (actions[i]) { |
michael@0 | 1341 | case SEEK: |
michael@0 | 1342 | case SEEK_AND_RESUME: |
michael@0 | 1343 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p CacheSeek to %lld (resume=%d)", stream, |
michael@0 | 1344 | (long long)stream->mChannelOffset, actions[i] == SEEK_AND_RESUME)); |
michael@0 | 1345 | rv = stream->mClient->CacheClientSeek(stream->mChannelOffset, |
michael@0 | 1346 | actions[i] == SEEK_AND_RESUME); |
michael@0 | 1347 | break; |
michael@0 | 1348 | case RESUME: |
michael@0 | 1349 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Resumed", stream)); |
michael@0 | 1350 | rv = stream->mClient->CacheClientResume(); |
michael@0 | 1351 | break; |
michael@0 | 1352 | case SUSPEND: |
michael@0 | 1353 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Suspended", stream)); |
michael@0 | 1354 | rv = stream->mClient->CacheClientSuspend(); |
michael@0 | 1355 | break; |
michael@0 | 1356 | default: |
michael@0 | 1357 | rv = NS_OK; |
michael@0 | 1358 | break; |
michael@0 | 1359 | } |
michael@0 | 1360 | |
michael@0 | 1361 | if (NS_FAILED(rv)) { |
michael@0 | 1362 | // Close the streams that failed due to error. This will cause all |
michael@0 | 1363 | // client Read and Seek operations on those streams to fail. Blocked |
michael@0 | 1364 | // Reads will also be woken up. |
michael@0 | 1365 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 1366 | stream->CloseInternal(mon); |
michael@0 | 1367 | } |
michael@0 | 1368 | } |
michael@0 | 1369 | } |
michael@0 | 1370 | |
michael@0 | 1371 | class UpdateEvent : public nsRunnable |
michael@0 | 1372 | { |
michael@0 | 1373 | public: |
michael@0 | 1374 | NS_IMETHOD Run() |
michael@0 | 1375 | { |
michael@0 | 1376 | if (gMediaCache) { |
michael@0 | 1377 | gMediaCache->Update(); |
michael@0 | 1378 | } |
michael@0 | 1379 | return NS_OK; |
michael@0 | 1380 | } |
michael@0 | 1381 | }; |
michael@0 | 1382 | |
michael@0 | 1383 | void |
michael@0 | 1384 | MediaCache::QueueUpdate() |
michael@0 | 1385 | { |
michael@0 | 1386 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1387 | |
michael@0 | 1388 | // Queuing an update while we're in an update raises a high risk of |
michael@0 | 1389 | // triggering endless events |
michael@0 | 1390 | NS_ASSERTION(!mInUpdate, |
michael@0 | 1391 | "Queuing an update while we're in an update"); |
michael@0 | 1392 | if (mUpdateQueued) |
michael@0 | 1393 | return; |
michael@0 | 1394 | mUpdateQueued = true; |
michael@0 | 1395 | nsCOMPtr<nsIRunnable> event = new UpdateEvent(); |
michael@0 | 1396 | NS_DispatchToMainThread(event); |
michael@0 | 1397 | } |
michael@0 | 1398 | |
michael@0 | 1399 | #ifdef DEBUG_VERIFY_CACHE |
michael@0 | 1400 | void |
michael@0 | 1401 | MediaCache::Verify() |
michael@0 | 1402 | { |
michael@0 | 1403 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1404 | |
michael@0 | 1405 | mFreeBlocks.Verify(); |
michael@0 | 1406 | for (uint32_t i = 0; i < mStreams.Length(); ++i) { |
michael@0 | 1407 | MediaCacheStream* stream = mStreams[i]; |
michael@0 | 1408 | stream->mReadaheadBlocks.Verify(); |
michael@0 | 1409 | stream->mPlayedBlocks.Verify(); |
michael@0 | 1410 | stream->mMetadataBlocks.Verify(); |
michael@0 | 1411 | |
michael@0 | 1412 | // Verify that the readahead blocks are listed in stream block order |
michael@0 | 1413 | int32_t block = stream->mReadaheadBlocks.GetFirstBlock(); |
michael@0 | 1414 | int32_t lastStreamBlock = -1; |
michael@0 | 1415 | while (block >= 0) { |
michael@0 | 1416 | uint32_t j = 0; |
michael@0 | 1417 | while (mIndex[block].mOwners[j].mStream != stream) { |
michael@0 | 1418 | ++j; |
michael@0 | 1419 | } |
michael@0 | 1420 | int32_t nextStreamBlock = |
michael@0 | 1421 | int32_t(mIndex[block].mOwners[j].mStreamBlock); |
michael@0 | 1422 | NS_ASSERTION(lastStreamBlock < nextStreamBlock, |
michael@0 | 1423 | "Blocks not increasing in readahead stream"); |
michael@0 | 1424 | lastStreamBlock = nextStreamBlock; |
michael@0 | 1425 | block = stream->mReadaheadBlocks.GetNextBlock(block); |
michael@0 | 1426 | } |
michael@0 | 1427 | } |
michael@0 | 1428 | } |
michael@0 | 1429 | #endif |
michael@0 | 1430 | |
michael@0 | 1431 | void |
michael@0 | 1432 | MediaCache::InsertReadaheadBlock(BlockOwner* aBlockOwner, |
michael@0 | 1433 | int32_t aBlockIndex) |
michael@0 | 1434 | { |
michael@0 | 1435 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1436 | |
michael@0 | 1437 | // Find the last block whose stream block is before aBlockIndex's |
michael@0 | 1438 | // stream block, and insert after it |
michael@0 | 1439 | MediaCacheStream* stream = aBlockOwner->mStream; |
michael@0 | 1440 | int32_t readaheadIndex = stream->mReadaheadBlocks.GetLastBlock(); |
michael@0 | 1441 | while (readaheadIndex >= 0) { |
michael@0 | 1442 | BlockOwner* bo = GetBlockOwner(readaheadIndex, stream); |
michael@0 | 1443 | NS_ASSERTION(bo, "stream must own its blocks"); |
michael@0 | 1444 | if (bo->mStreamBlock < aBlockOwner->mStreamBlock) { |
michael@0 | 1445 | stream->mReadaheadBlocks.AddAfter(aBlockIndex, readaheadIndex); |
michael@0 | 1446 | return; |
michael@0 | 1447 | } |
michael@0 | 1448 | NS_ASSERTION(bo->mStreamBlock > aBlockOwner->mStreamBlock, |
michael@0 | 1449 | "Duplicated blocks??"); |
michael@0 | 1450 | readaheadIndex = stream->mReadaheadBlocks.GetPrevBlock(readaheadIndex); |
michael@0 | 1451 | } |
michael@0 | 1452 | |
michael@0 | 1453 | stream->mReadaheadBlocks.AddFirstBlock(aBlockIndex); |
michael@0 | 1454 | Verify(); |
michael@0 | 1455 | } |
michael@0 | 1456 | |
michael@0 | 1457 | void |
michael@0 | 1458 | MediaCache::AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData, |
michael@0 | 1459 | MediaCacheStream::ReadMode aMode) |
michael@0 | 1460 | { |
michael@0 | 1461 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1462 | |
michael@0 | 1463 | int32_t streamBlockIndex = aStream->mChannelOffset/BLOCK_SIZE; |
michael@0 | 1464 | |
michael@0 | 1465 | // Remove all cached copies of this block |
michael@0 | 1466 | ResourceStreamIterator iter(aStream->mResourceID); |
michael@0 | 1467 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1468 | while (streamBlockIndex >= int32_t(stream->mBlocks.Length())) { |
michael@0 | 1469 | stream->mBlocks.AppendElement(-1); |
michael@0 | 1470 | } |
michael@0 | 1471 | if (stream->mBlocks[streamBlockIndex] >= 0) { |
michael@0 | 1472 | // We no longer want to own this block |
michael@0 | 1473 | int32_t globalBlockIndex = stream->mBlocks[streamBlockIndex]; |
michael@0 | 1474 | CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)", |
michael@0 | 1475 | globalBlockIndex, stream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE)); |
michael@0 | 1476 | RemoveBlockOwner(globalBlockIndex, stream); |
michael@0 | 1477 | } |
michael@0 | 1478 | } |
michael@0 | 1479 | |
michael@0 | 1480 | // Extend the mBlocks array as necessary |
michael@0 | 1481 | |
michael@0 | 1482 | TimeStamp now = TimeStamp::Now(); |
michael@0 | 1483 | int32_t blockIndex = FindBlockForIncomingData(now, aStream); |
michael@0 | 1484 | if (blockIndex >= 0) { |
michael@0 | 1485 | FreeBlock(blockIndex); |
michael@0 | 1486 | |
michael@0 | 1487 | Block* block = &mIndex[blockIndex]; |
michael@0 | 1488 | CACHE_LOG(PR_LOG_DEBUG, ("Allocated block %d to stream %p block %d(%lld)", |
michael@0 | 1489 | blockIndex, aStream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE)); |
michael@0 | 1490 | |
michael@0 | 1491 | mFreeBlocks.RemoveBlock(blockIndex); |
michael@0 | 1492 | |
michael@0 | 1493 | // Tell each stream using this resource about the new block. |
michael@0 | 1494 | ResourceStreamIterator iter(aStream->mResourceID); |
michael@0 | 1495 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1496 | BlockOwner* bo = block->mOwners.AppendElement(); |
michael@0 | 1497 | if (!bo) |
michael@0 | 1498 | return; |
michael@0 | 1499 | |
michael@0 | 1500 | bo->mStream = stream; |
michael@0 | 1501 | bo->mStreamBlock = streamBlockIndex; |
michael@0 | 1502 | bo->mLastUseTime = now; |
michael@0 | 1503 | stream->mBlocks[streamBlockIndex] = blockIndex; |
michael@0 | 1504 | if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) { |
michael@0 | 1505 | bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK |
michael@0 | 1506 | ? PLAYED_BLOCK : METADATA_BLOCK; |
michael@0 | 1507 | // This must be the most-recently-used block, since we |
michael@0 | 1508 | // marked it as used now (which may be slightly bogus, but we'll |
michael@0 | 1509 | // treat it as used for simplicity). |
michael@0 | 1510 | GetListForBlock(bo)->AddFirstBlock(blockIndex); |
michael@0 | 1511 | Verify(); |
michael@0 | 1512 | } else { |
michael@0 | 1513 | // This may not be the latest readahead block, although it usually |
michael@0 | 1514 | // will be. We may have to scan for the right place to insert |
michael@0 | 1515 | // the block in the list. |
michael@0 | 1516 | bo->mClass = READAHEAD_BLOCK; |
michael@0 | 1517 | InsertReadaheadBlock(bo, blockIndex); |
michael@0 | 1518 | } |
michael@0 | 1519 | } |
michael@0 | 1520 | |
michael@0 | 1521 | nsresult rv = mFileCache->WriteBlock(blockIndex, reinterpret_cast<const uint8_t*>(aData)); |
michael@0 | 1522 | if (NS_FAILED(rv)) { |
michael@0 | 1523 | CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)", |
michael@0 | 1524 | blockIndex, aStream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE)); |
michael@0 | 1525 | FreeBlock(blockIndex); |
michael@0 | 1526 | } |
michael@0 | 1527 | } |
michael@0 | 1528 | |
michael@0 | 1529 | // Queue an Update since the cache state has changed (for example |
michael@0 | 1530 | // we might want to stop loading because the cache is full) |
michael@0 | 1531 | QueueUpdate(); |
michael@0 | 1532 | } |
michael@0 | 1533 | |
michael@0 | 1534 | void |
michael@0 | 1535 | MediaCache::OpenStream(MediaCacheStream* aStream) |
michael@0 | 1536 | { |
michael@0 | 1537 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1538 | |
michael@0 | 1539 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 1540 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p opened", aStream)); |
michael@0 | 1541 | mStreams.AppendElement(aStream); |
michael@0 | 1542 | aStream->mResourceID = AllocateResourceID(); |
michael@0 | 1543 | |
michael@0 | 1544 | // Queue an update since a new stream has been opened. |
michael@0 | 1545 | gMediaCache->QueueUpdate(); |
michael@0 | 1546 | } |
michael@0 | 1547 | |
michael@0 | 1548 | void |
michael@0 | 1549 | MediaCache::ReleaseStream(MediaCacheStream* aStream) |
michael@0 | 1550 | { |
michael@0 | 1551 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1552 | |
michael@0 | 1553 | ReentrantMonitorAutoEnter mon(mReentrantMonitor); |
michael@0 | 1554 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p closed", aStream)); |
michael@0 | 1555 | mStreams.RemoveElement(aStream); |
michael@0 | 1556 | |
michael@0 | 1557 | // Update MediaCache again for |mStreams| is changed. |
michael@0 | 1558 | // We need to re-run Update() to ensure streams reading from the same resource |
michael@0 | 1559 | // as the removed stream get a chance to continue reading. |
michael@0 | 1560 | gMediaCache->QueueUpdate(); |
michael@0 | 1561 | } |
michael@0 | 1562 | |
michael@0 | 1563 | void |
michael@0 | 1564 | MediaCache::ReleaseStreamBlocks(MediaCacheStream* aStream) |
michael@0 | 1565 | { |
michael@0 | 1566 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1567 | |
michael@0 | 1568 | // XXX scanning the entire stream doesn't seem great, if not much of it |
michael@0 | 1569 | // is cached, but the only easy alternative is to scan the entire cache |
michael@0 | 1570 | // which isn't better |
michael@0 | 1571 | uint32_t length = aStream->mBlocks.Length(); |
michael@0 | 1572 | for (uint32_t i = 0; i < length; ++i) { |
michael@0 | 1573 | int32_t blockIndex = aStream->mBlocks[i]; |
michael@0 | 1574 | if (blockIndex >= 0) { |
michael@0 | 1575 | CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)", |
michael@0 | 1576 | blockIndex, aStream, i, (long long)i*BLOCK_SIZE)); |
michael@0 | 1577 | RemoveBlockOwner(blockIndex, aStream); |
michael@0 | 1578 | } |
michael@0 | 1579 | } |
michael@0 | 1580 | } |
michael@0 | 1581 | |
michael@0 | 1582 | void |
michael@0 | 1583 | MediaCache::Truncate() |
michael@0 | 1584 | { |
michael@0 | 1585 | uint32_t end; |
michael@0 | 1586 | for (end = mIndex.Length(); end > 0; --end) { |
michael@0 | 1587 | if (!IsBlockFree(end - 1)) |
michael@0 | 1588 | break; |
michael@0 | 1589 | mFreeBlocks.RemoveBlock(end - 1); |
michael@0 | 1590 | } |
michael@0 | 1591 | |
michael@0 | 1592 | if (end < mIndex.Length()) { |
michael@0 | 1593 | mIndex.TruncateLength(end); |
michael@0 | 1594 | // XXX We could truncate the cache file here, but we don't seem |
michael@0 | 1595 | // to have a cross-platform API for doing that. At least when all |
michael@0 | 1596 | // streams are closed we shut down the cache, which erases the |
michael@0 | 1597 | // file at that point. |
michael@0 | 1598 | } |
michael@0 | 1599 | } |
michael@0 | 1600 | |
michael@0 | 1601 | void |
michael@0 | 1602 | MediaCache::NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex, |
michael@0 | 1603 | MediaCacheStream::ReadMode aMode, |
michael@0 | 1604 | TimeStamp aNow) |
michael@0 | 1605 | { |
michael@0 | 1606 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1607 | |
michael@0 | 1608 | if (aBlockIndex < 0) { |
michael@0 | 1609 | // this block is not in the cache yet |
michael@0 | 1610 | return; |
michael@0 | 1611 | } |
michael@0 | 1612 | |
michael@0 | 1613 | BlockOwner* bo = GetBlockOwner(aBlockIndex, aStream); |
michael@0 | 1614 | if (!bo) { |
michael@0 | 1615 | // this block is not in the cache yet |
michael@0 | 1616 | return; |
michael@0 | 1617 | } |
michael@0 | 1618 | |
michael@0 | 1619 | // The following check has to be <= because the stream offset has |
michael@0 | 1620 | // not yet been updated for the data read from this block |
michael@0 | 1621 | NS_ASSERTION(bo->mStreamBlock*BLOCK_SIZE <= bo->mStream->mStreamOffset, |
michael@0 | 1622 | "Using a block that's behind the read position?"); |
michael@0 | 1623 | |
michael@0 | 1624 | GetListForBlock(bo)->RemoveBlock(aBlockIndex); |
michael@0 | 1625 | bo->mClass = |
michael@0 | 1626 | (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK) |
michael@0 | 1627 | ? METADATA_BLOCK : PLAYED_BLOCK; |
michael@0 | 1628 | // Since this is just being used now, it can definitely be at the front |
michael@0 | 1629 | // of mMetadataBlocks or mPlayedBlocks |
michael@0 | 1630 | GetListForBlock(bo)->AddFirstBlock(aBlockIndex); |
michael@0 | 1631 | bo->mLastUseTime = aNow; |
michael@0 | 1632 | Verify(); |
michael@0 | 1633 | } |
michael@0 | 1634 | |
michael@0 | 1635 | void |
michael@0 | 1636 | MediaCache::NoteSeek(MediaCacheStream* aStream, int64_t aOldOffset) |
michael@0 | 1637 | { |
michael@0 | 1638 | mReentrantMonitor.AssertCurrentThreadIn(); |
michael@0 | 1639 | |
michael@0 | 1640 | if (aOldOffset < aStream->mStreamOffset) { |
michael@0 | 1641 | // We seeked forward. Convert blocks from readahead to played. |
michael@0 | 1642 | // Any readahead block that intersects the seeked-over range must |
michael@0 | 1643 | // be converted. |
michael@0 | 1644 | int32_t blockIndex = aOldOffset/BLOCK_SIZE; |
michael@0 | 1645 | int32_t endIndex = |
michael@0 | 1646 | std::min<int64_t>((aStream->mStreamOffset + BLOCK_SIZE - 1)/BLOCK_SIZE, |
michael@0 | 1647 | aStream->mBlocks.Length()); |
michael@0 | 1648 | TimeStamp now = TimeStamp::Now(); |
michael@0 | 1649 | while (blockIndex < endIndex) { |
michael@0 | 1650 | int32_t cacheBlockIndex = aStream->mBlocks[blockIndex]; |
michael@0 | 1651 | if (cacheBlockIndex >= 0) { |
michael@0 | 1652 | // Marking the block used may not be exactly what we want but |
michael@0 | 1653 | // it's simple |
michael@0 | 1654 | NoteBlockUsage(aStream, cacheBlockIndex, MediaCacheStream::MODE_PLAYBACK, |
michael@0 | 1655 | now); |
michael@0 | 1656 | } |
michael@0 | 1657 | ++blockIndex; |
michael@0 | 1658 | } |
michael@0 | 1659 | } else { |
michael@0 | 1660 | // We seeked backward. Convert from played to readahead. |
michael@0 | 1661 | // Any played block that is entirely after the start of the seeked-over |
michael@0 | 1662 | // range must be converted. |
michael@0 | 1663 | int32_t blockIndex = |
michael@0 | 1664 | (aStream->mStreamOffset + BLOCK_SIZE - 1)/BLOCK_SIZE; |
michael@0 | 1665 | int32_t endIndex = |
michael@0 | 1666 | std::min<int64_t>((aOldOffset + BLOCK_SIZE - 1)/BLOCK_SIZE, |
michael@0 | 1667 | aStream->mBlocks.Length()); |
michael@0 | 1668 | while (blockIndex < endIndex) { |
michael@0 | 1669 | int32_t cacheBlockIndex = aStream->mBlocks[endIndex - 1]; |
michael@0 | 1670 | if (cacheBlockIndex >= 0) { |
michael@0 | 1671 | BlockOwner* bo = GetBlockOwner(cacheBlockIndex, aStream); |
michael@0 | 1672 | NS_ASSERTION(bo, "Stream doesn't own its blocks?"); |
michael@0 | 1673 | if (bo->mClass == PLAYED_BLOCK) { |
michael@0 | 1674 | aStream->mPlayedBlocks.RemoveBlock(cacheBlockIndex); |
michael@0 | 1675 | bo->mClass = READAHEAD_BLOCK; |
michael@0 | 1676 | // Adding this as the first block is sure to be OK since |
michael@0 | 1677 | // this must currently be the earliest readahead block |
michael@0 | 1678 | // (that's why we're proceeding backwards from the end of |
michael@0 | 1679 | // the seeked range to the start) |
michael@0 | 1680 | aStream->mReadaheadBlocks.AddFirstBlock(cacheBlockIndex); |
michael@0 | 1681 | Verify(); |
michael@0 | 1682 | } |
michael@0 | 1683 | } |
michael@0 | 1684 | --endIndex; |
michael@0 | 1685 | } |
michael@0 | 1686 | } |
michael@0 | 1687 | } |
michael@0 | 1688 | |
michael@0 | 1689 | void |
michael@0 | 1690 | MediaCacheStream::NotifyDataLength(int64_t aLength) |
michael@0 | 1691 | { |
michael@0 | 1692 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1693 | |
michael@0 | 1694 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1695 | mStreamLength = aLength; |
michael@0 | 1696 | } |
michael@0 | 1697 | |
michael@0 | 1698 | void |
michael@0 | 1699 | MediaCacheStream::NotifyDataStarted(int64_t aOffset) |
michael@0 | 1700 | { |
michael@0 | 1701 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1702 | |
michael@0 | 1703 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1704 | NS_WARN_IF_FALSE(aOffset == mChannelOffset, |
michael@0 | 1705 | "Server is giving us unexpected offset"); |
michael@0 | 1706 | MOZ_ASSERT(aOffset >= 0); |
michael@0 | 1707 | mChannelOffset = aOffset; |
michael@0 | 1708 | if (mStreamLength >= 0) { |
michael@0 | 1709 | // If we started reading at a certain offset, then for sure |
michael@0 | 1710 | // the stream is at least that long. |
michael@0 | 1711 | mStreamLength = std::max(mStreamLength, mChannelOffset); |
michael@0 | 1712 | } |
michael@0 | 1713 | } |
michael@0 | 1714 | |
michael@0 | 1715 | bool |
michael@0 | 1716 | MediaCacheStream::UpdatePrincipal(nsIPrincipal* aPrincipal) |
michael@0 | 1717 | { |
michael@0 | 1718 | return nsContentUtils::CombineResourcePrincipals(&mPrincipal, aPrincipal); |
michael@0 | 1719 | } |
michael@0 | 1720 | |
michael@0 | 1721 | void |
michael@0 | 1722 | MediaCacheStream::NotifyDataReceived(int64_t aSize, const char* aData, |
michael@0 | 1723 | nsIPrincipal* aPrincipal) |
michael@0 | 1724 | { |
michael@0 | 1725 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1726 | |
michael@0 | 1727 | // Update principals before putting the data in the cache. This is important, |
michael@0 | 1728 | // we want to make sure all principals are updated before any consumer |
michael@0 | 1729 | // can see the new data. |
michael@0 | 1730 | // We do this without holding the cache monitor, in case the client wants |
michael@0 | 1731 | // to do something that takes a lock. |
michael@0 | 1732 | { |
michael@0 | 1733 | MediaCache::ResourceStreamIterator iter(mResourceID); |
michael@0 | 1734 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1735 | if (stream->UpdatePrincipal(aPrincipal)) { |
michael@0 | 1736 | stream->mClient->CacheClientNotifyPrincipalChanged(); |
michael@0 | 1737 | } |
michael@0 | 1738 | } |
michael@0 | 1739 | } |
michael@0 | 1740 | |
michael@0 | 1741 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1742 | int64_t size = aSize; |
michael@0 | 1743 | const char* data = aData; |
michael@0 | 1744 | |
michael@0 | 1745 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p DataReceived at %lld count=%lld", |
michael@0 | 1746 | this, (long long)mChannelOffset, (long long)aSize)); |
michael@0 | 1747 | |
michael@0 | 1748 | // We process the data one block (or part of a block) at a time |
michael@0 | 1749 | while (size > 0) { |
michael@0 | 1750 | uint32_t blockIndex = mChannelOffset/BLOCK_SIZE; |
michael@0 | 1751 | int32_t blockOffset = int32_t(mChannelOffset - blockIndex*BLOCK_SIZE); |
michael@0 | 1752 | int32_t chunkSize = std::min<int64_t>(BLOCK_SIZE - blockOffset, size); |
michael@0 | 1753 | |
michael@0 | 1754 | // This gets set to something non-null if we have a whole block |
michael@0 | 1755 | // of data to write to the cache |
michael@0 | 1756 | const char* blockDataToStore = nullptr; |
michael@0 | 1757 | ReadMode mode = MODE_PLAYBACK; |
michael@0 | 1758 | if (blockOffset == 0 && chunkSize == BLOCK_SIZE) { |
michael@0 | 1759 | // We received a whole block, so avoid a useless copy through |
michael@0 | 1760 | // mPartialBlockBuffer |
michael@0 | 1761 | blockDataToStore = data; |
michael@0 | 1762 | } else { |
michael@0 | 1763 | if (blockOffset == 0) { |
michael@0 | 1764 | // We've just started filling this buffer so now is a good time |
michael@0 | 1765 | // to clear this flag. |
michael@0 | 1766 | mMetadataInPartialBlockBuffer = false; |
michael@0 | 1767 | } |
michael@0 | 1768 | memcpy(reinterpret_cast<char*>(mPartialBlockBuffer.get()) + blockOffset, |
michael@0 | 1769 | data, chunkSize); |
michael@0 | 1770 | |
michael@0 | 1771 | if (blockOffset + chunkSize == BLOCK_SIZE) { |
michael@0 | 1772 | // We completed a block, so lets write it out. |
michael@0 | 1773 | blockDataToStore = reinterpret_cast<char*>(mPartialBlockBuffer.get()); |
michael@0 | 1774 | if (mMetadataInPartialBlockBuffer) { |
michael@0 | 1775 | mode = MODE_METADATA; |
michael@0 | 1776 | } |
michael@0 | 1777 | } |
michael@0 | 1778 | } |
michael@0 | 1779 | |
michael@0 | 1780 | if (blockDataToStore) { |
michael@0 | 1781 | gMediaCache->AllocateAndWriteBlock(this, blockDataToStore, mode); |
michael@0 | 1782 | } |
michael@0 | 1783 | |
michael@0 | 1784 | mChannelOffset += chunkSize; |
michael@0 | 1785 | size -= chunkSize; |
michael@0 | 1786 | data += chunkSize; |
michael@0 | 1787 | } |
michael@0 | 1788 | |
michael@0 | 1789 | MediaCache::ResourceStreamIterator iter(mResourceID); |
michael@0 | 1790 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1791 | if (stream->mStreamLength >= 0) { |
michael@0 | 1792 | // The stream is at least as long as what we've read |
michael@0 | 1793 | stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset); |
michael@0 | 1794 | } |
michael@0 | 1795 | stream->mClient->CacheClientNotifyDataReceived(); |
michael@0 | 1796 | } |
michael@0 | 1797 | |
michael@0 | 1798 | // Notify in case there's a waiting reader |
michael@0 | 1799 | // XXX it would be fairly easy to optimize things a lot more to |
michael@0 | 1800 | // avoid waking up reader threads unnecessarily |
michael@0 | 1801 | mon.NotifyAll(); |
michael@0 | 1802 | } |
michael@0 | 1803 | |
michael@0 | 1804 | void |
michael@0 | 1805 | MediaCacheStream::FlushPartialBlockInternal(bool aNotifyAll) |
michael@0 | 1806 | { |
michael@0 | 1807 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1808 | |
michael@0 | 1809 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1810 | |
michael@0 | 1811 | int32_t blockOffset = int32_t(mChannelOffset%BLOCK_SIZE); |
michael@0 | 1812 | if (blockOffset > 0) { |
michael@0 | 1813 | CACHE_LOG(PR_LOG_DEBUG, |
michael@0 | 1814 | ("Stream %p writing partial block: [%d] bytes; " |
michael@0 | 1815 | "mStreamOffset [%lld] mChannelOffset[%lld] mStreamLength [%lld] " |
michael@0 | 1816 | "notifying: [%s]", |
michael@0 | 1817 | this, blockOffset, mStreamOffset, mChannelOffset, mStreamLength, |
michael@0 | 1818 | aNotifyAll ? "yes" : "no")); |
michael@0 | 1819 | |
michael@0 | 1820 | // Write back the partial block |
michael@0 | 1821 | memset(reinterpret_cast<char*>(mPartialBlockBuffer.get()) + blockOffset, 0, |
michael@0 | 1822 | BLOCK_SIZE - blockOffset); |
michael@0 | 1823 | gMediaCache->AllocateAndWriteBlock(this, mPartialBlockBuffer, |
michael@0 | 1824 | mMetadataInPartialBlockBuffer ? MODE_METADATA : MODE_PLAYBACK); |
michael@0 | 1825 | if (aNotifyAll) { |
michael@0 | 1826 | // Wake up readers who may be waiting for this data |
michael@0 | 1827 | mon.NotifyAll(); |
michael@0 | 1828 | } |
michael@0 | 1829 | } |
michael@0 | 1830 | } |
michael@0 | 1831 | |
michael@0 | 1832 | void |
michael@0 | 1833 | MediaCacheStream::FlushPartialBlock() |
michael@0 | 1834 | { |
michael@0 | 1835 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1836 | |
michael@0 | 1837 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1838 | |
michael@0 | 1839 | // Write the current partial block to memory. |
michael@0 | 1840 | // Note: This writes a full block, so if data is not at the end of the |
michael@0 | 1841 | // stream, the decoder must subsequently choose correct start and end offsets |
michael@0 | 1842 | // for reading/seeking. |
michael@0 | 1843 | FlushPartialBlockInternal(false); |
michael@0 | 1844 | |
michael@0 | 1845 | gMediaCache->QueueUpdate(); |
michael@0 | 1846 | } |
michael@0 | 1847 | |
michael@0 | 1848 | void |
michael@0 | 1849 | MediaCacheStream::NotifyDataEnded(nsresult aStatus) |
michael@0 | 1850 | { |
michael@0 | 1851 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1852 | |
michael@0 | 1853 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1854 | |
michael@0 | 1855 | if (NS_FAILED(aStatus)) { |
michael@0 | 1856 | // Disconnect from other streams sharing our resource, since they |
michael@0 | 1857 | // should continue trying to load. Our load might have been deliberately |
michael@0 | 1858 | // canceled and that shouldn't affect other streams. |
michael@0 | 1859 | mResourceID = gMediaCache->AllocateResourceID(); |
michael@0 | 1860 | } |
michael@0 | 1861 | |
michael@0 | 1862 | FlushPartialBlockInternal(true); |
michael@0 | 1863 | |
michael@0 | 1864 | if (!mDidNotifyDataEnded) { |
michael@0 | 1865 | MediaCache::ResourceStreamIterator iter(mResourceID); |
michael@0 | 1866 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1867 | if (NS_SUCCEEDED(aStatus)) { |
michael@0 | 1868 | // We read the whole stream, so remember the true length |
michael@0 | 1869 | stream->mStreamLength = mChannelOffset; |
michael@0 | 1870 | } |
michael@0 | 1871 | NS_ASSERTION(!stream->mDidNotifyDataEnded, "Stream already ended!"); |
michael@0 | 1872 | stream->mDidNotifyDataEnded = true; |
michael@0 | 1873 | stream->mNotifyDataEndedStatus = aStatus; |
michael@0 | 1874 | stream->mClient->CacheClientNotifyDataEnded(aStatus); |
michael@0 | 1875 | } |
michael@0 | 1876 | } |
michael@0 | 1877 | |
michael@0 | 1878 | mChannelEnded = true; |
michael@0 | 1879 | gMediaCache->QueueUpdate(); |
michael@0 | 1880 | } |
michael@0 | 1881 | |
michael@0 | 1882 | MediaCacheStream::~MediaCacheStream() |
michael@0 | 1883 | { |
michael@0 | 1884 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1885 | NS_ASSERTION(!mPinCount, "Unbalanced Pin"); |
michael@0 | 1886 | |
michael@0 | 1887 | if (gMediaCache) { |
michael@0 | 1888 | NS_ASSERTION(mClosed, "Stream was not closed"); |
michael@0 | 1889 | gMediaCache->ReleaseStream(this); |
michael@0 | 1890 | MediaCache::MaybeShutdown(); |
michael@0 | 1891 | } |
michael@0 | 1892 | } |
michael@0 | 1893 | |
michael@0 | 1894 | void |
michael@0 | 1895 | MediaCacheStream::SetTransportSeekable(bool aIsTransportSeekable) |
michael@0 | 1896 | { |
michael@0 | 1897 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1898 | NS_ASSERTION(mIsTransportSeekable || aIsTransportSeekable || |
michael@0 | 1899 | mChannelOffset == 0, "channel offset must be zero when we become non-seekable"); |
michael@0 | 1900 | mIsTransportSeekable = aIsTransportSeekable; |
michael@0 | 1901 | // Queue an Update since we may change our strategy for dealing |
michael@0 | 1902 | // with this stream |
michael@0 | 1903 | gMediaCache->QueueUpdate(); |
michael@0 | 1904 | } |
michael@0 | 1905 | |
michael@0 | 1906 | bool |
michael@0 | 1907 | MediaCacheStream::IsTransportSeekable() |
michael@0 | 1908 | { |
michael@0 | 1909 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1910 | return mIsTransportSeekable; |
michael@0 | 1911 | } |
michael@0 | 1912 | |
michael@0 | 1913 | bool |
michael@0 | 1914 | MediaCacheStream::AreAllStreamsForResourceSuspended() |
michael@0 | 1915 | { |
michael@0 | 1916 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1917 | MediaCache::ResourceStreamIterator iter(mResourceID); |
michael@0 | 1918 | // Look for a stream that's able to read the data we need |
michael@0 | 1919 | int64_t dataOffset = -1; |
michael@0 | 1920 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 1921 | if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) { |
michael@0 | 1922 | continue; |
michael@0 | 1923 | } |
michael@0 | 1924 | if (dataOffset < 0) { |
michael@0 | 1925 | dataOffset = GetCachedDataEndInternal(mStreamOffset); |
michael@0 | 1926 | } |
michael@0 | 1927 | // Ignore streams that are reading beyond the data we need |
michael@0 | 1928 | if (stream->mChannelOffset > dataOffset) { |
michael@0 | 1929 | continue; |
michael@0 | 1930 | } |
michael@0 | 1931 | return false; |
michael@0 | 1932 | } |
michael@0 | 1933 | |
michael@0 | 1934 | return true; |
michael@0 | 1935 | } |
michael@0 | 1936 | |
michael@0 | 1937 | void |
michael@0 | 1938 | MediaCacheStream::Close() |
michael@0 | 1939 | { |
michael@0 | 1940 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1941 | |
michael@0 | 1942 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1943 | CloseInternal(mon); |
michael@0 | 1944 | // Queue an Update since we may have created more free space. Don't do |
michael@0 | 1945 | // it from CloseInternal since that gets called by Update() itself |
michael@0 | 1946 | // sometimes, and we try to not to queue updates from Update(). |
michael@0 | 1947 | gMediaCache->QueueUpdate(); |
michael@0 | 1948 | } |
michael@0 | 1949 | |
michael@0 | 1950 | void |
michael@0 | 1951 | MediaCacheStream::EnsureCacheUpdate() |
michael@0 | 1952 | { |
michael@0 | 1953 | if (mHasHadUpdate) |
michael@0 | 1954 | return; |
michael@0 | 1955 | gMediaCache->Update(); |
michael@0 | 1956 | } |
michael@0 | 1957 | |
michael@0 | 1958 | void |
michael@0 | 1959 | MediaCacheStream::CloseInternal(ReentrantMonitorAutoEnter& aReentrantMonitor) |
michael@0 | 1960 | { |
michael@0 | 1961 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 1962 | |
michael@0 | 1963 | if (mClosed) |
michael@0 | 1964 | return; |
michael@0 | 1965 | mClosed = true; |
michael@0 | 1966 | gMediaCache->ReleaseStreamBlocks(this); |
michael@0 | 1967 | // Wake up any blocked readers |
michael@0 | 1968 | aReentrantMonitor.NotifyAll(); |
michael@0 | 1969 | } |
michael@0 | 1970 | |
michael@0 | 1971 | void |
michael@0 | 1972 | MediaCacheStream::Pin() |
michael@0 | 1973 | { |
michael@0 | 1974 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1975 | ++mPinCount; |
michael@0 | 1976 | // Queue an Update since we may no longer want to read more into the |
michael@0 | 1977 | // cache, if this stream's block have become non-evictable |
michael@0 | 1978 | gMediaCache->QueueUpdate(); |
michael@0 | 1979 | } |
michael@0 | 1980 | |
michael@0 | 1981 | void |
michael@0 | 1982 | MediaCacheStream::Unpin() |
michael@0 | 1983 | { |
michael@0 | 1984 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1985 | NS_ASSERTION(mPinCount > 0, "Unbalanced Unpin"); |
michael@0 | 1986 | --mPinCount; |
michael@0 | 1987 | // Queue an Update since we may be able to read more into the |
michael@0 | 1988 | // cache, if this stream's block have become evictable |
michael@0 | 1989 | gMediaCache->QueueUpdate(); |
michael@0 | 1990 | } |
michael@0 | 1991 | |
michael@0 | 1992 | int64_t |
michael@0 | 1993 | MediaCacheStream::GetLength() |
michael@0 | 1994 | { |
michael@0 | 1995 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 1996 | return mStreamLength; |
michael@0 | 1997 | } |
michael@0 | 1998 | |
michael@0 | 1999 | int64_t |
michael@0 | 2000 | MediaCacheStream::GetNextCachedData(int64_t aOffset) |
michael@0 | 2001 | { |
michael@0 | 2002 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2003 | return GetNextCachedDataInternal(aOffset); |
michael@0 | 2004 | } |
michael@0 | 2005 | |
michael@0 | 2006 | int64_t |
michael@0 | 2007 | MediaCacheStream::GetCachedDataEnd(int64_t aOffset) |
michael@0 | 2008 | { |
michael@0 | 2009 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2010 | return GetCachedDataEndInternal(aOffset); |
michael@0 | 2011 | } |
michael@0 | 2012 | |
michael@0 | 2013 | bool |
michael@0 | 2014 | MediaCacheStream::IsDataCachedToEndOfStream(int64_t aOffset) |
michael@0 | 2015 | { |
michael@0 | 2016 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2017 | if (mStreamLength < 0) |
michael@0 | 2018 | return false; |
michael@0 | 2019 | return GetCachedDataEndInternal(aOffset) >= mStreamLength; |
michael@0 | 2020 | } |
michael@0 | 2021 | |
michael@0 | 2022 | int64_t |
michael@0 | 2023 | MediaCacheStream::GetCachedDataEndInternal(int64_t aOffset) |
michael@0 | 2024 | { |
michael@0 | 2025 | gMediaCache->GetReentrantMonitor().AssertCurrentThreadIn(); |
michael@0 | 2026 | uint32_t startBlockIndex = aOffset/BLOCK_SIZE; |
michael@0 | 2027 | uint32_t blockIndex = startBlockIndex; |
michael@0 | 2028 | while (blockIndex < mBlocks.Length() && mBlocks[blockIndex] != -1) { |
michael@0 | 2029 | ++blockIndex; |
michael@0 | 2030 | } |
michael@0 | 2031 | int64_t result = blockIndex*BLOCK_SIZE; |
michael@0 | 2032 | if (blockIndex == mChannelOffset/BLOCK_SIZE) { |
michael@0 | 2033 | // The block containing mChannelOffset may be partially read but not |
michael@0 | 2034 | // yet committed to the main cache |
michael@0 | 2035 | result = mChannelOffset; |
michael@0 | 2036 | } |
michael@0 | 2037 | if (mStreamLength >= 0) { |
michael@0 | 2038 | // The last block in the cache may only be partially valid, so limit |
michael@0 | 2039 | // the cached range to the stream length |
michael@0 | 2040 | result = std::min(result, mStreamLength); |
michael@0 | 2041 | } |
michael@0 | 2042 | return std::max(result, aOffset); |
michael@0 | 2043 | } |
michael@0 | 2044 | |
michael@0 | 2045 | int64_t |
michael@0 | 2046 | MediaCacheStream::GetNextCachedDataInternal(int64_t aOffset) |
michael@0 | 2047 | { |
michael@0 | 2048 | gMediaCache->GetReentrantMonitor().AssertCurrentThreadIn(); |
michael@0 | 2049 | if (aOffset == mStreamLength) |
michael@0 | 2050 | return -1; |
michael@0 | 2051 | |
michael@0 | 2052 | uint32_t startBlockIndex = aOffset/BLOCK_SIZE; |
michael@0 | 2053 | uint32_t channelBlockIndex = mChannelOffset/BLOCK_SIZE; |
michael@0 | 2054 | |
michael@0 | 2055 | if (startBlockIndex == channelBlockIndex && |
michael@0 | 2056 | aOffset < mChannelOffset) { |
michael@0 | 2057 | // The block containing mChannelOffset is partially read, but not |
michael@0 | 2058 | // yet committed to the main cache. aOffset lies in the partially |
michael@0 | 2059 | // read portion, thus it is effectively cached. |
michael@0 | 2060 | return aOffset; |
michael@0 | 2061 | } |
michael@0 | 2062 | |
michael@0 | 2063 | if (startBlockIndex >= mBlocks.Length()) |
michael@0 | 2064 | return -1; |
michael@0 | 2065 | |
michael@0 | 2066 | // Is the current block cached? |
michael@0 | 2067 | if (mBlocks[startBlockIndex] != -1) |
michael@0 | 2068 | return aOffset; |
michael@0 | 2069 | |
michael@0 | 2070 | // Count the number of uncached blocks |
michael@0 | 2071 | bool hasPartialBlock = (mChannelOffset % BLOCK_SIZE) != 0; |
michael@0 | 2072 | uint32_t blockIndex = startBlockIndex + 1; |
michael@0 | 2073 | while (true) { |
michael@0 | 2074 | if ((hasPartialBlock && blockIndex == channelBlockIndex) || |
michael@0 | 2075 | (blockIndex < mBlocks.Length() && mBlocks[blockIndex] != -1)) { |
michael@0 | 2076 | // We at the incoming channel block, which has has data in it, |
michael@0 | 2077 | // or are we at a cached block. Return index of block start. |
michael@0 | 2078 | return blockIndex * BLOCK_SIZE; |
michael@0 | 2079 | } |
michael@0 | 2080 | |
michael@0 | 2081 | // No more cached blocks? |
michael@0 | 2082 | if (blockIndex >= mBlocks.Length()) |
michael@0 | 2083 | return -1; |
michael@0 | 2084 | |
michael@0 | 2085 | ++blockIndex; |
michael@0 | 2086 | } |
michael@0 | 2087 | |
michael@0 | 2088 | NS_NOTREACHED("Should return in loop"); |
michael@0 | 2089 | return -1; |
michael@0 | 2090 | } |
michael@0 | 2091 | |
michael@0 | 2092 | void |
michael@0 | 2093 | MediaCacheStream::SetReadMode(ReadMode aMode) |
michael@0 | 2094 | { |
michael@0 | 2095 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2096 | if (aMode == mCurrentMode) |
michael@0 | 2097 | return; |
michael@0 | 2098 | mCurrentMode = aMode; |
michael@0 | 2099 | gMediaCache->QueueUpdate(); |
michael@0 | 2100 | } |
michael@0 | 2101 | |
michael@0 | 2102 | void |
michael@0 | 2103 | MediaCacheStream::SetPlaybackRate(uint32_t aBytesPerSecond) |
michael@0 | 2104 | { |
michael@0 | 2105 | NS_ASSERTION(aBytesPerSecond > 0, "Zero playback rate not allowed"); |
michael@0 | 2106 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2107 | if (aBytesPerSecond == mPlaybackBytesPerSecond) |
michael@0 | 2108 | return; |
michael@0 | 2109 | mPlaybackBytesPerSecond = aBytesPerSecond; |
michael@0 | 2110 | gMediaCache->QueueUpdate(); |
michael@0 | 2111 | } |
michael@0 | 2112 | |
michael@0 | 2113 | nsresult |
michael@0 | 2114 | MediaCacheStream::Seek(int32_t aWhence, int64_t aOffset) |
michael@0 | 2115 | { |
michael@0 | 2116 | NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); |
michael@0 | 2117 | |
michael@0 | 2118 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2119 | if (mClosed) |
michael@0 | 2120 | return NS_ERROR_FAILURE; |
michael@0 | 2121 | |
michael@0 | 2122 | int64_t oldOffset = mStreamOffset; |
michael@0 | 2123 | int64_t newOffset = mStreamOffset; |
michael@0 | 2124 | switch (aWhence) { |
michael@0 | 2125 | case PR_SEEK_END: |
michael@0 | 2126 | if (mStreamLength < 0) |
michael@0 | 2127 | return NS_ERROR_FAILURE; |
michael@0 | 2128 | newOffset = mStreamLength + aOffset; |
michael@0 | 2129 | break; |
michael@0 | 2130 | case PR_SEEK_CUR: |
michael@0 | 2131 | newOffset += aOffset; |
michael@0 | 2132 | break; |
michael@0 | 2133 | case PR_SEEK_SET: |
michael@0 | 2134 | newOffset = aOffset; |
michael@0 | 2135 | break; |
michael@0 | 2136 | default: |
michael@0 | 2137 | NS_ERROR("Unknown whence"); |
michael@0 | 2138 | return NS_ERROR_FAILURE; |
michael@0 | 2139 | } |
michael@0 | 2140 | |
michael@0 | 2141 | if (newOffset < 0) |
michael@0 | 2142 | return NS_ERROR_FAILURE; |
michael@0 | 2143 | mStreamOffset = newOffset; |
michael@0 | 2144 | |
michael@0 | 2145 | CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Seek to %lld", this, (long long)mStreamOffset)); |
michael@0 | 2146 | gMediaCache->NoteSeek(this, oldOffset); |
michael@0 | 2147 | |
michael@0 | 2148 | gMediaCache->QueueUpdate(); |
michael@0 | 2149 | return NS_OK; |
michael@0 | 2150 | } |
michael@0 | 2151 | |
michael@0 | 2152 | int64_t |
michael@0 | 2153 | MediaCacheStream::Tell() |
michael@0 | 2154 | { |
michael@0 | 2155 | NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); |
michael@0 | 2156 | |
michael@0 | 2157 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2158 | return mStreamOffset; |
michael@0 | 2159 | } |
michael@0 | 2160 | |
michael@0 | 2161 | nsresult |
michael@0 | 2162 | MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes) |
michael@0 | 2163 | { |
michael@0 | 2164 | NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); |
michael@0 | 2165 | |
michael@0 | 2166 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2167 | if (mClosed) |
michael@0 | 2168 | return NS_ERROR_FAILURE; |
michael@0 | 2169 | |
michael@0 | 2170 | uint32_t count = 0; |
michael@0 | 2171 | // Read one block (or part of a block) at a time |
michael@0 | 2172 | while (count < aCount) { |
michael@0 | 2173 | uint32_t streamBlock = uint32_t(mStreamOffset/BLOCK_SIZE); |
michael@0 | 2174 | uint32_t offsetInStreamBlock = |
michael@0 | 2175 | uint32_t(mStreamOffset - streamBlock*BLOCK_SIZE); |
michael@0 | 2176 | int64_t size = std::min(aCount - count, BLOCK_SIZE - offsetInStreamBlock); |
michael@0 | 2177 | |
michael@0 | 2178 | if (mStreamLength >= 0) { |
michael@0 | 2179 | // Don't try to read beyond the end of the stream |
michael@0 | 2180 | int64_t bytesRemaining = mStreamLength - mStreamOffset; |
michael@0 | 2181 | if (bytesRemaining <= 0) { |
michael@0 | 2182 | // Get out of here and return NS_OK |
michael@0 | 2183 | break; |
michael@0 | 2184 | } |
michael@0 | 2185 | size = std::min(size, bytesRemaining); |
michael@0 | 2186 | // Clamp size until 64-bit file size issues are fixed. |
michael@0 | 2187 | size = std::min(size, int64_t(INT32_MAX)); |
michael@0 | 2188 | } |
michael@0 | 2189 | |
michael@0 | 2190 | int32_t cacheBlock = streamBlock < mBlocks.Length() ? mBlocks[streamBlock] : -1; |
michael@0 | 2191 | if (cacheBlock < 0) { |
michael@0 | 2192 | // We don't have a complete cached block here. |
michael@0 | 2193 | |
michael@0 | 2194 | if (count > 0) { |
michael@0 | 2195 | // Some data has been read, so return what we've got instead of |
michael@0 | 2196 | // blocking or trying to find a stream with a partial block. |
michael@0 | 2197 | break; |
michael@0 | 2198 | } |
michael@0 | 2199 | |
michael@0 | 2200 | // See if the data is available in the partial cache block of any |
michael@0 | 2201 | // stream reading this resource. We need to do this in case there is |
michael@0 | 2202 | // another stream with this resource that has all the data to the end of |
michael@0 | 2203 | // the stream but the data doesn't end on a block boundary. |
michael@0 | 2204 | MediaCacheStream* streamWithPartialBlock = nullptr; |
michael@0 | 2205 | MediaCache::ResourceStreamIterator iter(mResourceID); |
michael@0 | 2206 | while (MediaCacheStream* stream = iter.Next()) { |
michael@0 | 2207 | if (uint32_t(stream->mChannelOffset/BLOCK_SIZE) == streamBlock && |
michael@0 | 2208 | mStreamOffset < stream->mChannelOffset) { |
michael@0 | 2209 | streamWithPartialBlock = stream; |
michael@0 | 2210 | break; |
michael@0 | 2211 | } |
michael@0 | 2212 | } |
michael@0 | 2213 | if (streamWithPartialBlock) { |
michael@0 | 2214 | // We can just use the data in mPartialBlockBuffer. In fact we should |
michael@0 | 2215 | // use it rather than waiting for the block to fill and land in |
michael@0 | 2216 | // the cache. |
michael@0 | 2217 | int64_t bytes = std::min<int64_t>(size, streamWithPartialBlock->mChannelOffset - mStreamOffset); |
michael@0 | 2218 | // Clamp bytes until 64-bit file size issues are fixed. |
michael@0 | 2219 | bytes = std::min(bytes, int64_t(INT32_MAX)); |
michael@0 | 2220 | NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= aCount, "Bytes out of range."); |
michael@0 | 2221 | memcpy(aBuffer, |
michael@0 | 2222 | reinterpret_cast<char*>(streamWithPartialBlock->mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes); |
michael@0 | 2223 | if (mCurrentMode == MODE_METADATA) { |
michael@0 | 2224 | streamWithPartialBlock->mMetadataInPartialBlockBuffer = true; |
michael@0 | 2225 | } |
michael@0 | 2226 | mStreamOffset += bytes; |
michael@0 | 2227 | count = bytes; |
michael@0 | 2228 | break; |
michael@0 | 2229 | } |
michael@0 | 2230 | |
michael@0 | 2231 | // No data has been read yet, so block |
michael@0 | 2232 | mon.Wait(); |
michael@0 | 2233 | if (mClosed) { |
michael@0 | 2234 | // We may have successfully read some data, but let's just throw |
michael@0 | 2235 | // that out. |
michael@0 | 2236 | return NS_ERROR_FAILURE; |
michael@0 | 2237 | } |
michael@0 | 2238 | continue; |
michael@0 | 2239 | } |
michael@0 | 2240 | |
michael@0 | 2241 | gMediaCache->NoteBlockUsage(this, cacheBlock, mCurrentMode, TimeStamp::Now()); |
michael@0 | 2242 | |
michael@0 | 2243 | int64_t offset = cacheBlock*BLOCK_SIZE + offsetInStreamBlock; |
michael@0 | 2244 | int32_t bytes; |
michael@0 | 2245 | NS_ABORT_IF_FALSE(size >= 0 && size <= INT32_MAX, "Size out of range."); |
michael@0 | 2246 | nsresult rv = gMediaCache->ReadCacheFile(offset, aBuffer + count, int32_t(size), &bytes); |
michael@0 | 2247 | if (NS_FAILED(rv)) { |
michael@0 | 2248 | if (count == 0) |
michael@0 | 2249 | return rv; |
michael@0 | 2250 | // If we did successfully read some data, may as well return it |
michael@0 | 2251 | break; |
michael@0 | 2252 | } |
michael@0 | 2253 | mStreamOffset += bytes; |
michael@0 | 2254 | count += bytes; |
michael@0 | 2255 | } |
michael@0 | 2256 | |
michael@0 | 2257 | if (count > 0) { |
michael@0 | 2258 | // Some data was read, so queue an update since block priorities may |
michael@0 | 2259 | // have changed |
michael@0 | 2260 | gMediaCache->QueueUpdate(); |
michael@0 | 2261 | } |
michael@0 | 2262 | CACHE_LOG(PR_LOG_DEBUG, |
michael@0 | 2263 | ("Stream %p Read at %lld count=%d", this, (long long)(mStreamOffset-count), count)); |
michael@0 | 2264 | *aBytes = count; |
michael@0 | 2265 | return NS_OK; |
michael@0 | 2266 | } |
michael@0 | 2267 | |
michael@0 | 2268 | nsresult |
michael@0 | 2269 | MediaCacheStream::ReadAt(int64_t aOffset, char* aBuffer, |
michael@0 | 2270 | uint32_t aCount, uint32_t* aBytes) |
michael@0 | 2271 | { |
michael@0 | 2272 | NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread"); |
michael@0 | 2273 | |
michael@0 | 2274 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2275 | nsresult rv = Seek(nsISeekableStream::NS_SEEK_SET, aOffset); |
michael@0 | 2276 | if (NS_FAILED(rv)) return rv; |
michael@0 | 2277 | return Read(aBuffer, aCount, aBytes); |
michael@0 | 2278 | } |
michael@0 | 2279 | |
michael@0 | 2280 | nsresult |
michael@0 | 2281 | MediaCacheStream::ReadFromCache(char* aBuffer, int64_t aOffset, int64_t aCount) |
michael@0 | 2282 | { |
michael@0 | 2283 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2284 | if (mClosed) |
michael@0 | 2285 | return NS_ERROR_FAILURE; |
michael@0 | 2286 | |
michael@0 | 2287 | // Read one block (or part of a block) at a time |
michael@0 | 2288 | uint32_t count = 0; |
michael@0 | 2289 | int64_t streamOffset = aOffset; |
michael@0 | 2290 | while (count < aCount) { |
michael@0 | 2291 | uint32_t streamBlock = uint32_t(streamOffset/BLOCK_SIZE); |
michael@0 | 2292 | uint32_t offsetInStreamBlock = |
michael@0 | 2293 | uint32_t(streamOffset - streamBlock*BLOCK_SIZE); |
michael@0 | 2294 | int64_t size = std::min<int64_t>(aCount - count, BLOCK_SIZE - offsetInStreamBlock); |
michael@0 | 2295 | |
michael@0 | 2296 | if (mStreamLength >= 0) { |
michael@0 | 2297 | // Don't try to read beyond the end of the stream |
michael@0 | 2298 | int64_t bytesRemaining = mStreamLength - streamOffset; |
michael@0 | 2299 | if (bytesRemaining <= 0) { |
michael@0 | 2300 | return NS_ERROR_FAILURE; |
michael@0 | 2301 | } |
michael@0 | 2302 | size = std::min(size, bytesRemaining); |
michael@0 | 2303 | // Clamp size until 64-bit file size issues are fixed. |
michael@0 | 2304 | size = std::min(size, int64_t(INT32_MAX)); |
michael@0 | 2305 | } |
michael@0 | 2306 | |
michael@0 | 2307 | int32_t bytes; |
michael@0 | 2308 | uint32_t channelBlock = uint32_t(mChannelOffset/BLOCK_SIZE); |
michael@0 | 2309 | int32_t cacheBlock = streamBlock < mBlocks.Length() ? mBlocks[streamBlock] : -1; |
michael@0 | 2310 | if (channelBlock == streamBlock && streamOffset < mChannelOffset) { |
michael@0 | 2311 | // We can just use the data in mPartialBlockBuffer. In fact we should |
michael@0 | 2312 | // use it rather than waiting for the block to fill and land in |
michael@0 | 2313 | // the cache. |
michael@0 | 2314 | // Clamp bytes until 64-bit file size issues are fixed. |
michael@0 | 2315 | int64_t toCopy = std::min<int64_t>(size, mChannelOffset - streamOffset); |
michael@0 | 2316 | bytes = std::min(toCopy, int64_t(INT32_MAX)); |
michael@0 | 2317 | NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= toCopy, "Bytes out of range."); |
michael@0 | 2318 | memcpy(aBuffer + count, |
michael@0 | 2319 | reinterpret_cast<char*>(mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes); |
michael@0 | 2320 | } else { |
michael@0 | 2321 | if (cacheBlock < 0) { |
michael@0 | 2322 | // We expect all blocks to be cached! Fail! |
michael@0 | 2323 | return NS_ERROR_FAILURE; |
michael@0 | 2324 | } |
michael@0 | 2325 | int64_t offset = cacheBlock*BLOCK_SIZE + offsetInStreamBlock; |
michael@0 | 2326 | NS_ABORT_IF_FALSE(size >= 0 && size <= INT32_MAX, "Size out of range."); |
michael@0 | 2327 | nsresult rv = gMediaCache->ReadCacheFile(offset, aBuffer + count, int32_t(size), &bytes); |
michael@0 | 2328 | if (NS_FAILED(rv)) { |
michael@0 | 2329 | return rv; |
michael@0 | 2330 | } |
michael@0 | 2331 | } |
michael@0 | 2332 | streamOffset += bytes; |
michael@0 | 2333 | count += bytes; |
michael@0 | 2334 | } |
michael@0 | 2335 | |
michael@0 | 2336 | return NS_OK; |
michael@0 | 2337 | } |
michael@0 | 2338 | |
michael@0 | 2339 | nsresult |
michael@0 | 2340 | MediaCacheStream::Init() |
michael@0 | 2341 | { |
michael@0 | 2342 | NS_ASSERTION(NS_IsMainThread(), "Only call on main thread"); |
michael@0 | 2343 | |
michael@0 | 2344 | if (mInitialized) |
michael@0 | 2345 | return NS_OK; |
michael@0 | 2346 | |
michael@0 | 2347 | InitMediaCache(); |
michael@0 | 2348 | if (!gMediaCache) |
michael@0 | 2349 | return NS_ERROR_FAILURE; |
michael@0 | 2350 | gMediaCache->OpenStream(this); |
michael@0 | 2351 | mInitialized = true; |
michael@0 | 2352 | return NS_OK; |
michael@0 | 2353 | } |
michael@0 | 2354 | |
michael@0 | 2355 | nsresult |
michael@0 | 2356 | MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal) |
michael@0 | 2357 | { |
michael@0 | 2358 | if (!aOriginal->IsAvailableForSharing()) |
michael@0 | 2359 | return NS_ERROR_FAILURE; |
michael@0 | 2360 | |
michael@0 | 2361 | if (mInitialized) |
michael@0 | 2362 | return NS_OK; |
michael@0 | 2363 | |
michael@0 | 2364 | nsresult rv = Init(); |
michael@0 | 2365 | if (NS_FAILED(rv)) |
michael@0 | 2366 | return rv; |
michael@0 | 2367 | mResourceID = aOriginal->mResourceID; |
michael@0 | 2368 | |
michael@0 | 2369 | // Grab cache blocks from aOriginal as readahead blocks for our stream |
michael@0 | 2370 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2371 | |
michael@0 | 2372 | mPrincipal = aOriginal->mPrincipal; |
michael@0 | 2373 | mStreamLength = aOriginal->mStreamLength; |
michael@0 | 2374 | mIsTransportSeekable = aOriginal->mIsTransportSeekable; |
michael@0 | 2375 | |
michael@0 | 2376 | // Cloned streams are initially suspended, since there is no channel open |
michael@0 | 2377 | // initially for a clone. |
michael@0 | 2378 | mCacheSuspended = true; |
michael@0 | 2379 | mChannelEnded = true; |
michael@0 | 2380 | |
michael@0 | 2381 | if (aOriginal->mDidNotifyDataEnded) { |
michael@0 | 2382 | mNotifyDataEndedStatus = aOriginal->mNotifyDataEndedStatus; |
michael@0 | 2383 | mDidNotifyDataEnded = true; |
michael@0 | 2384 | mClient->CacheClientNotifyDataEnded(mNotifyDataEndedStatus); |
michael@0 | 2385 | } |
michael@0 | 2386 | |
michael@0 | 2387 | for (uint32_t i = 0; i < aOriginal->mBlocks.Length(); ++i) { |
michael@0 | 2388 | int32_t cacheBlockIndex = aOriginal->mBlocks[i]; |
michael@0 | 2389 | if (cacheBlockIndex < 0) |
michael@0 | 2390 | continue; |
michael@0 | 2391 | |
michael@0 | 2392 | while (i >= mBlocks.Length()) { |
michael@0 | 2393 | mBlocks.AppendElement(-1); |
michael@0 | 2394 | } |
michael@0 | 2395 | // Every block is a readahead block for the clone because the clone's initial |
michael@0 | 2396 | // stream offset is zero |
michael@0 | 2397 | gMediaCache->AddBlockOwnerAsReadahead(cacheBlockIndex, this, i); |
michael@0 | 2398 | } |
michael@0 | 2399 | |
michael@0 | 2400 | return NS_OK; |
michael@0 | 2401 | } |
michael@0 | 2402 | |
michael@0 | 2403 | nsresult MediaCacheStream::GetCachedRanges(nsTArray<MediaByteRange>& aRanges) |
michael@0 | 2404 | { |
michael@0 | 2405 | // Take the monitor, so that the cached data ranges can't grow while we're |
michael@0 | 2406 | // trying to loop over them. |
michael@0 | 2407 | ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor()); |
michael@0 | 2408 | |
michael@0 | 2409 | // We must be pinned while running this, otherwise the cached data ranges may |
michael@0 | 2410 | // shrink while we're trying to loop over them. |
michael@0 | 2411 | NS_ASSERTION(mPinCount > 0, "Must be pinned"); |
michael@0 | 2412 | |
michael@0 | 2413 | int64_t startOffset = GetNextCachedData(0); |
michael@0 | 2414 | while (startOffset >= 0) { |
michael@0 | 2415 | int64_t endOffset = GetCachedDataEnd(startOffset); |
michael@0 | 2416 | NS_ASSERTION(startOffset < endOffset, "Buffered range must end after its start"); |
michael@0 | 2417 | // Bytes [startOffset..endOffset] are cached. |
michael@0 | 2418 | aRanges.AppendElement(MediaByteRange(startOffset, endOffset)); |
michael@0 | 2419 | startOffset = GetNextCachedData(endOffset); |
michael@0 | 2420 | NS_ASSERTION(startOffset == -1 || startOffset > endOffset, |
michael@0 | 2421 | "Must have advanced to start of next range, or hit end of stream"); |
michael@0 | 2422 | } |
michael@0 | 2423 | return NS_OK; |
michael@0 | 2424 | } |
michael@0 | 2425 | |
michael@0 | 2426 | } // namespace mozilla |
michael@0 | 2427 |