Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/ReentrantMonitor.h"
9 #include "MediaCache.h"
10 #include "prio.h"
11 #include "nsContentUtils.h"
12 #include "nsThreadUtils.h"
13 #include "MediaResource.h"
14 #include "prlog.h"
15 #include "mozilla/Preferences.h"
16 #include "FileBlockCache.h"
17 #include "nsAnonymousTemporaryFile.h"
18 #include "nsIObserverService.h"
19 #include "nsISeekableStream.h"
20 #include "nsIPrincipal.h"
21 #include "mozilla/Attributes.h"
22 #include "mozilla/Services.h"
23 #include <algorithm>
25 namespace mozilla {
27 #ifdef PR_LOGGING
28 PRLogModuleInfo* gMediaCacheLog;
29 #define CACHE_LOG(type, msg) PR_LOG(gMediaCacheLog, type, msg)
30 #else
31 #define CACHE_LOG(type, msg)
32 #endif
34 // Readahead blocks for non-seekable streams will be limited to this
35 // fraction of the cache space. We don't normally evict such blocks
36 // because replacing them requires a seek, but we need to make sure
37 // they don't monopolize the cache.
38 static const double NONSEEKABLE_READAHEAD_MAX = 0.5;
40 // Data N seconds before the current playback position is given the same priority
41 // as data REPLAY_PENALTY_FACTOR*N seconds ahead of the current playback
42 // position. REPLAY_PENALTY_FACTOR is greater than 1 to reflect that
43 // data in the past is less likely to be played again than data in the future.
44 // We want to give data just behind the current playback position reasonably
45 // high priority in case codecs need to retrieve that data (e.g. because
46 // tracks haven't been muxed well or are being decoded at uneven rates).
47 // 1/REPLAY_PENALTY_FACTOR as much data will be kept behind the
48 // current playback position as will be kept ahead of the current playback
49 // position.
50 static const uint32_t REPLAY_PENALTY_FACTOR = 3;
52 // When looking for a reusable block, scan forward this many blocks
53 // from the desired "best" block location to look for free blocks,
54 // before we resort to scanning the whole cache. The idea is to try to
55 // store runs of stream blocks close-to-consecutively in the cache if we
56 // can.
57 static const uint32_t FREE_BLOCK_SCAN_LIMIT = 16;
59 // Try to save power by not resuming paused reads if the stream won't need new
60 // data within this time interval in the future
61 static const uint32_t CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS = 10000;
63 #ifdef DEBUG
64 // Turn this on to do very expensive cache state validation
65 // #define DEBUG_VERIFY_CACHE
66 #endif
68 // There is at most one media cache (although that could quite easily be
69 // relaxed if we wanted to manage multiple caches with independent
70 // size limits).
71 static MediaCache* gMediaCache;
73 class MediaCacheFlusher MOZ_FINAL : public nsIObserver,
74 public nsSupportsWeakReference {
75 MediaCacheFlusher() {}
76 ~MediaCacheFlusher();
77 public:
78 NS_DECL_ISUPPORTS
79 NS_DECL_NSIOBSERVER
81 static void Init();
82 };
84 static MediaCacheFlusher* gMediaCacheFlusher;
86 NS_IMPL_ISUPPORTS(MediaCacheFlusher, nsIObserver, nsISupportsWeakReference)
88 MediaCacheFlusher::~MediaCacheFlusher()
89 {
90 gMediaCacheFlusher = nullptr;
91 }
93 void MediaCacheFlusher::Init()
94 {
95 if (gMediaCacheFlusher) {
96 return;
97 }
99 gMediaCacheFlusher = new MediaCacheFlusher();
100 NS_ADDREF(gMediaCacheFlusher);
102 nsCOMPtr<nsIObserverService> observerService =
103 mozilla::services::GetObserverService();
104 if (observerService) {
105 observerService->AddObserver(gMediaCacheFlusher, "last-pb-context-exited", true);
106 observerService->AddObserver(gMediaCacheFlusher, "network-clear-cache-stored-anywhere", true);
107 }
108 }
110 class MediaCache {
111 public:
112 friend class MediaCacheStream::BlockList;
113 typedef MediaCacheStream::BlockList BlockList;
114 enum {
115 BLOCK_SIZE = MediaCacheStream::BLOCK_SIZE
116 };
118 MediaCache() : mNextResourceID(1),
119 mReentrantMonitor("MediaCache.mReentrantMonitor"),
120 mUpdateQueued(false)
121 #ifdef DEBUG
122 , mInUpdate(false)
123 #endif
124 {
125 MOZ_COUNT_CTOR(MediaCache);
126 }
127 ~MediaCache() {
128 NS_ASSERTION(mStreams.IsEmpty(), "Stream(s) still open!");
129 Truncate();
130 NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?");
131 if (mFileCache) {
132 mFileCache->Close();
133 mFileCache = nullptr;
134 }
135 MOZ_COUNT_DTOR(MediaCache);
136 }
138 // Main thread only. Creates the backing cache file. If this fails,
139 // then the cache is still in a semi-valid state; mFD will be null,
140 // so all I/O on the cache file will fail.
141 nsresult Init();
142 // Shut down the global cache if it's no longer needed. We shut down
143 // the cache as soon as there are no streams. This means that during
144 // normal operation we are likely to start up the cache and shut it down
145 // many times, but that's OK since starting it up is cheap and
146 // shutting it down cleans things up and releases disk space.
147 static void MaybeShutdown();
149 // Brutally flush the cache contents. Main thread only.
150 static void Flush();
151 void FlushInternal();
153 // Cache-file access methods. These are the lowest-level cache methods.
154 // mReentrantMonitor must be held; these can be called on any thread.
155 // This can return partial reads.
156 nsresult ReadCacheFile(int64_t aOffset, void* aData, int32_t aLength,
157 int32_t* aBytes);
158 // This will fail if all aLength bytes are not read
159 nsresult ReadCacheFileAllBytes(int64_t aOffset, void* aData, int32_t aLength);
161 int64_t AllocateResourceID()
162 {
163 mReentrantMonitor.AssertCurrentThreadIn();
164 return mNextResourceID++;
165 }
167 // mReentrantMonitor must be held, called on main thread.
168 // These methods are used by the stream to set up and tear down streams,
169 // and to handle reads and writes.
170 // Add aStream to the list of streams.
171 void OpenStream(MediaCacheStream* aStream);
172 // Remove aStream from the list of streams.
173 void ReleaseStream(MediaCacheStream* aStream);
174 // Free all blocks belonging to aStream.
175 void ReleaseStreamBlocks(MediaCacheStream* aStream);
176 // Find a cache entry for this data, and write the data into it
177 void AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData,
178 MediaCacheStream::ReadMode aMode);
180 // mReentrantMonitor must be held; can be called on any thread
181 // Notify the cache that a seek has been requested. Some blocks may
182 // need to change their class between PLAYED_BLOCK and READAHEAD_BLOCK.
183 // This does not trigger channel seeks directly, the next Update()
184 // will do that if necessary. The caller will call QueueUpdate().
185 void NoteSeek(MediaCacheStream* aStream, int64_t aOldOffset);
186 // Notify the cache that a block has been read from. This is used
187 // to update last-use times. The block may not actually have a
188 // cache entry yet since Read can read data from a stream's
189 // in-memory mPartialBlockBuffer while the block is only partly full,
190 // and thus hasn't yet been committed to the cache. The caller will
191 // call QueueUpdate().
192 void NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex,
193 MediaCacheStream::ReadMode aMode, TimeStamp aNow);
194 // Mark aStream as having the block, adding it as an owner.
195 void AddBlockOwnerAsReadahead(int32_t aBlockIndex, MediaCacheStream* aStream,
196 int32_t aStreamBlockIndex);
198 // This queues a call to Update() on the main thread.
199 void QueueUpdate();
201 // Updates the cache state asynchronously on the main thread:
202 // -- try to trim the cache back to its desired size, if necessary
203 // -- suspend channels that are going to read data that's lower priority
204 // than anything currently cached
205 // -- resume channels that are going to read data that's higher priority
206 // than something currently cached
207 // -- seek channels that need to seek to a new location
208 void Update();
210 #ifdef DEBUG_VERIFY_CACHE
211 // Verify invariants, especially block list invariants
212 void Verify();
213 #else
214 void Verify() {}
215 #endif
217 ReentrantMonitor& GetReentrantMonitor() { return mReentrantMonitor; }
219 /**
220 * An iterator that makes it easy to iterate through all streams that
221 * have a given resource ID and are not closed.
222 * Can be used on the main thread or while holding the media cache lock.
223 */
224 class ResourceStreamIterator {
225 public:
226 ResourceStreamIterator(int64_t aResourceID) :
227 mResourceID(aResourceID), mNext(0) {}
228 MediaCacheStream* Next()
229 {
230 while (mNext < gMediaCache->mStreams.Length()) {
231 MediaCacheStream* stream = gMediaCache->mStreams[mNext];
232 ++mNext;
233 if (stream->GetResourceID() == mResourceID && !stream->IsClosed())
234 return stream;
235 }
236 return nullptr;
237 }
238 private:
239 int64_t mResourceID;
240 uint32_t mNext;
241 };
243 protected:
244 // Find a free or reusable block and return its index. If there are no
245 // free blocks and no reusable blocks, add a new block to the cache
246 // and return it. Can return -1 on OOM.
247 int32_t FindBlockForIncomingData(TimeStamp aNow, MediaCacheStream* aStream);
248 // Find a reusable block --- a free block, if there is one, otherwise
249 // the reusable block with the latest predicted-next-use, or -1 if
250 // there aren't any freeable blocks. Only block indices less than
251 // aMaxSearchBlockIndex are considered. If aForStream is non-null,
252 // then aForStream and aForStreamBlock indicate what media data will
253 // be placed; FindReusableBlock will favour returning free blocks
254 // near other blocks for that point in the stream.
255 int32_t FindReusableBlock(TimeStamp aNow,
256 MediaCacheStream* aForStream,
257 int32_t aForStreamBlock,
258 int32_t aMaxSearchBlockIndex);
259 bool BlockIsReusable(int32_t aBlockIndex);
260 // Given a list of blocks sorted with the most reusable blocks at the
261 // end, find the last block whose stream is not pinned (if any)
262 // and whose cache entry index is less than aBlockIndexLimit
263 // and append it to aResult.
264 void AppendMostReusableBlock(BlockList* aBlockList,
265 nsTArray<uint32_t>* aResult,
266 int32_t aBlockIndexLimit);
268 enum BlockClass {
269 // block belongs to mMetadataBlockList because data has been consumed
270 // from it in "metadata mode" --- in particular blocks read during
271 // Ogg seeks go into this class. These blocks may have played data
272 // in them too.
273 METADATA_BLOCK,
274 // block belongs to mPlayedBlockList because its offset is
275 // less than the stream's current reader position
276 PLAYED_BLOCK,
277 // block belongs to the stream's mReadaheadBlockList because its
278 // offset is greater than or equal to the stream's current
279 // reader position
280 READAHEAD_BLOCK
281 };
283 struct BlockOwner {
284 BlockOwner() : mStream(nullptr), mClass(READAHEAD_BLOCK) {}
286 // The stream that owns this block, or null if the block is free.
287 MediaCacheStream* mStream;
288 // The block index in the stream. Valid only if mStream is non-null.
289 uint32_t mStreamBlock;
290 // Time at which this block was last used. Valid only if
291 // mClass is METADATA_BLOCK or PLAYED_BLOCK.
292 TimeStamp mLastUseTime;
293 BlockClass mClass;
294 };
296 struct Block {
297 // Free blocks have an empty mOwners array
298 nsTArray<BlockOwner> mOwners;
299 };
301 // Get the BlockList that the block should belong to given its
302 // current owner
303 BlockList* GetListForBlock(BlockOwner* aBlock);
304 // Get the BlockOwner for the given block index and owning stream
305 // (returns null if the stream does not own the block)
306 BlockOwner* GetBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream);
307 // Returns true iff the block is free
308 bool IsBlockFree(int32_t aBlockIndex)
309 { return mIndex[aBlockIndex].mOwners.IsEmpty(); }
310 // Add the block to the free list and mark its streams as not having
311 // the block in cache
312 void FreeBlock(int32_t aBlock);
313 // Mark aStream as not having the block, removing it as an owner. If
314 // the block has no more owners it's added to the free list.
315 void RemoveBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream);
316 // Swap all metadata associated with the two blocks. The caller
317 // is responsible for swapping up any cache file state.
318 void SwapBlocks(int32_t aBlockIndex1, int32_t aBlockIndex2);
319 // Insert the block into the readahead block list for the stream
320 // at the right point in the list.
321 void InsertReadaheadBlock(BlockOwner* aBlockOwner, int32_t aBlockIndex);
323 // Guess the duration until block aBlock will be next used
324 TimeDuration PredictNextUse(TimeStamp aNow, int32_t aBlock);
325 // Guess the duration until the next incoming data on aStream will be used
326 TimeDuration PredictNextUseForIncomingData(MediaCacheStream* aStream);
328 // Truncate the file and index array if there are free blocks at the
329 // end
330 void Truncate();
332 // This member is main-thread only. It's used to allocate unique
333 // resource IDs to streams.
334 int64_t mNextResourceID;
336 // The monitor protects all the data members here. Also, off-main-thread
337 // readers that need to block will Wait() on this monitor. When new
338 // data becomes available in the cache, we NotifyAll() on this monitor.
339 ReentrantMonitor mReentrantMonitor;
340 // This is only written while on the main thread and the monitor is held.
341 // Thus, it can be safely read from the main thread or while holding the monitor.
342 nsTArray<MediaCacheStream*> mStreams;
343 // The Blocks describing the cache entries.
344 nsTArray<Block> mIndex;
345 // Writer which performs IO, asynchronously writing cache blocks.
346 nsRefPtr<FileBlockCache> mFileCache;
347 // The list of free blocks; they are not ordered.
348 BlockList mFreeBlocks;
349 // True if an event to run Update() has been queued but not processed
350 bool mUpdateQueued;
351 #ifdef DEBUG
352 bool mInUpdate;
353 #endif
354 };
356 NS_IMETHODIMP
357 MediaCacheFlusher::Observe(nsISupports *aSubject, char const *aTopic, char16_t const *aData)
358 {
359 if (strcmp(aTopic, "last-pb-context-exited") == 0) {
360 MediaCache::Flush();
361 }
362 if (strcmp(aTopic, "network-clear-cache-stored-anywhere") == 0) {
363 MediaCache::Flush();
364 }
365 return NS_OK;
366 }
368 MediaCacheStream::MediaCacheStream(ChannelMediaResource* aClient)
369 : mClient(aClient),
370 mInitialized(false),
371 mHasHadUpdate(false),
372 mClosed(false),
373 mDidNotifyDataEnded(false),
374 mResourceID(0),
375 mIsTransportSeekable(false),
376 mCacheSuspended(false),
377 mChannelEnded(false),
378 mChannelOffset(0),
379 mStreamLength(-1),
380 mStreamOffset(0),
381 mPlaybackBytesPerSecond(10000),
382 mPinCount(0),
383 mCurrentMode(MODE_PLAYBACK),
384 mMetadataInPartialBlockBuffer(false),
385 mPartialBlockBuffer(new int64_t[BLOCK_SIZE/sizeof(int64_t)])
386 {
387 }
389 size_t MediaCacheStream::SizeOfExcludingThis(
390 MallocSizeOf aMallocSizeOf) const
391 {
392 // Looks like these are not owned:
393 // - mClient
394 // - mPrincipal
395 size_t size = mBlocks.SizeOfExcludingThis(aMallocSizeOf);
396 size += mReadaheadBlocks.SizeOfExcludingThis(aMallocSizeOf);
397 size += mMetadataBlocks.SizeOfExcludingThis(aMallocSizeOf);
398 size += mPlayedBlocks.SizeOfExcludingThis(aMallocSizeOf);
399 size += mPartialBlockBuffer.SizeOfExcludingThis(aMallocSizeOf);
401 return size;
402 }
404 size_t MediaCacheStream::BlockList::SizeOfExcludingThis(
405 MallocSizeOf aMallocSizeOf) const
406 {
407 return mEntries.SizeOfExcludingThis(/* sizeOfEntryExcludingThis = */ nullptr,
408 aMallocSizeOf);
409 }
411 void MediaCacheStream::BlockList::AddFirstBlock(int32_t aBlock)
412 {
413 NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list");
414 Entry* entry = mEntries.PutEntry(aBlock);
416 if (mFirstBlock < 0) {
417 entry->mNextBlock = entry->mPrevBlock = aBlock;
418 } else {
419 entry->mNextBlock = mFirstBlock;
420 entry->mPrevBlock = mEntries.GetEntry(mFirstBlock)->mPrevBlock;
421 mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock;
422 mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock;
423 }
424 mFirstBlock = aBlock;
425 ++mCount;
426 }
428 void MediaCacheStream::BlockList::AddAfter(int32_t aBlock, int32_t aBefore)
429 {
430 NS_ASSERTION(!mEntries.GetEntry(aBlock), "Block already in list");
431 Entry* entry = mEntries.PutEntry(aBlock);
433 Entry* addAfter = mEntries.GetEntry(aBefore);
434 NS_ASSERTION(addAfter, "aBefore not in list");
436 entry->mNextBlock = addAfter->mNextBlock;
437 entry->mPrevBlock = aBefore;
438 mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = aBlock;
439 mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = aBlock;
440 ++mCount;
441 }
443 void MediaCacheStream::BlockList::RemoveBlock(int32_t aBlock)
444 {
445 Entry* entry = mEntries.GetEntry(aBlock);
446 NS_ASSERTION(entry, "Block not in list");
448 if (entry->mNextBlock == aBlock) {
449 NS_ASSERTION(entry->mPrevBlock == aBlock, "Linked list inconsistency");
450 NS_ASSERTION(mFirstBlock == aBlock, "Linked list inconsistency");
451 mFirstBlock = -1;
452 } else {
453 if (mFirstBlock == aBlock) {
454 mFirstBlock = entry->mNextBlock;
455 }
456 mEntries.GetEntry(entry->mNextBlock)->mPrevBlock = entry->mPrevBlock;
457 mEntries.GetEntry(entry->mPrevBlock)->mNextBlock = entry->mNextBlock;
458 }
459 mEntries.RemoveEntry(aBlock);
460 --mCount;
461 }
463 int32_t MediaCacheStream::BlockList::GetLastBlock() const
464 {
465 if (mFirstBlock < 0)
466 return -1;
467 return mEntries.GetEntry(mFirstBlock)->mPrevBlock;
468 }
470 int32_t MediaCacheStream::BlockList::GetNextBlock(int32_t aBlock) const
471 {
472 int32_t block = mEntries.GetEntry(aBlock)->mNextBlock;
473 if (block == mFirstBlock)
474 return -1;
475 return block;
476 }
478 int32_t MediaCacheStream::BlockList::GetPrevBlock(int32_t aBlock) const
479 {
480 if (aBlock == mFirstBlock)
481 return -1;
482 return mEntries.GetEntry(aBlock)->mPrevBlock;
483 }
485 #ifdef DEBUG
486 void MediaCacheStream::BlockList::Verify()
487 {
488 int32_t count = 0;
489 if (mFirstBlock >= 0) {
490 int32_t block = mFirstBlock;
491 do {
492 Entry* entry = mEntries.GetEntry(block);
493 NS_ASSERTION(mEntries.GetEntry(entry->mNextBlock)->mPrevBlock == block,
494 "Bad prev link");
495 NS_ASSERTION(mEntries.GetEntry(entry->mPrevBlock)->mNextBlock == block,
496 "Bad next link");
497 block = entry->mNextBlock;
498 ++count;
499 } while (block != mFirstBlock);
500 }
501 NS_ASSERTION(count == mCount, "Bad count");
502 }
503 #endif
505 static void UpdateSwappedBlockIndex(int32_t* aBlockIndex,
506 int32_t aBlock1Index, int32_t aBlock2Index)
507 {
508 int32_t index = *aBlockIndex;
509 if (index == aBlock1Index) {
510 *aBlockIndex = aBlock2Index;
511 } else if (index == aBlock2Index) {
512 *aBlockIndex = aBlock1Index;
513 }
514 }
516 void
517 MediaCacheStream::BlockList::NotifyBlockSwapped(int32_t aBlockIndex1,
518 int32_t aBlockIndex2)
519 {
520 Entry* e1 = mEntries.GetEntry(aBlockIndex1);
521 Entry* e2 = mEntries.GetEntry(aBlockIndex2);
522 int32_t e1Prev = -1, e1Next = -1, e2Prev = -1, e2Next = -1;
524 // Fix mFirstBlock
525 UpdateSwappedBlockIndex(&mFirstBlock, aBlockIndex1, aBlockIndex2);
527 // Fix mNextBlock/mPrevBlock links. First capture previous/next links
528 // so we don't get confused due to aliasing.
529 if (e1) {
530 e1Prev = e1->mPrevBlock;
531 e1Next = e1->mNextBlock;
532 }
533 if (e2) {
534 e2Prev = e2->mPrevBlock;
535 e2Next = e2->mNextBlock;
536 }
537 // Update the entries.
538 if (e1) {
539 mEntries.GetEntry(e1Prev)->mNextBlock = aBlockIndex2;
540 mEntries.GetEntry(e1Next)->mPrevBlock = aBlockIndex2;
541 }
542 if (e2) {
543 mEntries.GetEntry(e2Prev)->mNextBlock = aBlockIndex1;
544 mEntries.GetEntry(e2Next)->mPrevBlock = aBlockIndex1;
545 }
547 // Fix hashtable keys. First remove stale entries.
548 if (e1) {
549 e1Prev = e1->mPrevBlock;
550 e1Next = e1->mNextBlock;
551 mEntries.RemoveEntry(aBlockIndex1);
552 // Refresh pointer after hashtable mutation.
553 e2 = mEntries.GetEntry(aBlockIndex2);
554 }
555 if (e2) {
556 e2Prev = e2->mPrevBlock;
557 e2Next = e2->mNextBlock;
558 mEntries.RemoveEntry(aBlockIndex2);
559 }
560 // Put new entries back.
561 if (e1) {
562 e1 = mEntries.PutEntry(aBlockIndex2);
563 e1->mNextBlock = e1Next;
564 e1->mPrevBlock = e1Prev;
565 }
566 if (e2) {
567 e2 = mEntries.PutEntry(aBlockIndex1);
568 e2->mNextBlock = e2Next;
569 e2->mPrevBlock = e2Prev;
570 }
571 }
573 nsresult
574 MediaCache::Init()
575 {
576 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
577 NS_ASSERTION(!mFileCache, "Cache file already open?");
579 PRFileDesc* fileDesc = nullptr;
580 nsresult rv = NS_OpenAnonymousTemporaryFile(&fileDesc);
581 NS_ENSURE_SUCCESS(rv,rv);
583 mFileCache = new FileBlockCache();
584 rv = mFileCache->Open(fileDesc);
585 NS_ENSURE_SUCCESS(rv,rv);
587 #ifdef PR_LOGGING
588 if (!gMediaCacheLog) {
589 gMediaCacheLog = PR_NewLogModule("MediaCache");
590 }
591 #endif
593 MediaCacheFlusher::Init();
595 return NS_OK;
596 }
598 void
599 MediaCache::Flush()
600 {
601 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
603 if (!gMediaCache)
604 return;
606 gMediaCache->FlushInternal();
607 }
609 void
610 MediaCache::FlushInternal()
611 {
612 ReentrantMonitorAutoEnter mon(mReentrantMonitor);
614 for (uint32_t blockIndex = 0; blockIndex < mIndex.Length(); ++blockIndex) {
615 FreeBlock(blockIndex);
616 }
618 // Truncate file, close it, and reopen
619 Truncate();
620 NS_ASSERTION(mIndex.Length() == 0, "Blocks leaked?");
621 if (mFileCache) {
622 mFileCache->Close();
623 mFileCache = nullptr;
624 }
625 Init();
626 }
628 void
629 MediaCache::MaybeShutdown()
630 {
631 NS_ASSERTION(NS_IsMainThread(),
632 "MediaCache::MaybeShutdown called on non-main thread");
633 if (!gMediaCache->mStreams.IsEmpty()) {
634 // Don't shut down yet, streams are still alive
635 return;
636 }
638 // Since we're on the main thread, no-one is going to add a new stream
639 // while we shut down.
640 // This function is static so we don't have to delete 'this'.
641 delete gMediaCache;
642 gMediaCache = nullptr;
643 NS_IF_RELEASE(gMediaCacheFlusher);
644 }
646 static void
647 InitMediaCache()
648 {
649 if (gMediaCache)
650 return;
652 gMediaCache = new MediaCache();
653 if (!gMediaCache)
654 return;
656 nsresult rv = gMediaCache->Init();
657 if (NS_FAILED(rv)) {
658 delete gMediaCache;
659 gMediaCache = nullptr;
660 }
661 }
663 nsresult
664 MediaCache::ReadCacheFile(int64_t aOffset, void* aData, int32_t aLength,
665 int32_t* aBytes)
666 {
667 mReentrantMonitor.AssertCurrentThreadIn();
669 if (!mFileCache)
670 return NS_ERROR_FAILURE;
672 return mFileCache->Read(aOffset, reinterpret_cast<uint8_t*>(aData), aLength, aBytes);
673 }
675 nsresult
676 MediaCache::ReadCacheFileAllBytes(int64_t aOffset, void* aData, int32_t aLength)
677 {
678 mReentrantMonitor.AssertCurrentThreadIn();
680 int64_t offset = aOffset;
681 int32_t count = aLength;
682 // Cast to char* so we can do byte-wise pointer arithmetic
683 char* data = static_cast<char*>(aData);
684 while (count > 0) {
685 int32_t bytes;
686 nsresult rv = ReadCacheFile(offset, data, count, &bytes);
687 if (NS_FAILED(rv))
688 return rv;
689 if (bytes == 0)
690 return NS_ERROR_FAILURE;
691 count -= bytes;
692 data += bytes;
693 offset += bytes;
694 }
695 return NS_OK;
696 }
698 static int32_t GetMaxBlocks()
699 {
700 // We look up the cache size every time. This means dynamic changes
701 // to the pref are applied.
702 // Cache size is in KB
703 int32_t cacheSize = Preferences::GetInt("media.cache_size", 500*1024);
704 int64_t maxBlocks = static_cast<int64_t>(cacheSize)*1024/MediaCache::BLOCK_SIZE;
705 maxBlocks = std::max<int64_t>(maxBlocks, 1);
706 return int32_t(std::min<int64_t>(maxBlocks, INT32_MAX));
707 }
709 int32_t
710 MediaCache::FindBlockForIncomingData(TimeStamp aNow,
711 MediaCacheStream* aStream)
712 {
713 mReentrantMonitor.AssertCurrentThreadIn();
715 int32_t blockIndex = FindReusableBlock(aNow, aStream,
716 aStream->mChannelOffset/BLOCK_SIZE, INT32_MAX);
718 if (blockIndex < 0 || !IsBlockFree(blockIndex)) {
719 // The block returned is already allocated.
720 // Don't reuse it if a) there's room to expand the cache or
721 // b) the data we're going to store in the free block is not higher
722 // priority than the data already stored in the free block.
723 // The latter can lead us to go over the cache limit a bit.
724 if ((mIndex.Length() < uint32_t(GetMaxBlocks()) || blockIndex < 0 ||
725 PredictNextUseForIncomingData(aStream) >= PredictNextUse(aNow, blockIndex))) {
726 blockIndex = mIndex.Length();
727 if (!mIndex.AppendElement())
728 return -1;
729 mFreeBlocks.AddFirstBlock(blockIndex);
730 return blockIndex;
731 }
732 }
734 return blockIndex;
735 }
737 bool
738 MediaCache::BlockIsReusable(int32_t aBlockIndex)
739 {
740 Block* block = &mIndex[aBlockIndex];
741 for (uint32_t i = 0; i < block->mOwners.Length(); ++i) {
742 MediaCacheStream* stream = block->mOwners[i].mStream;
743 if (stream->mPinCount > 0 ||
744 stream->mStreamOffset/BLOCK_SIZE == block->mOwners[i].mStreamBlock) {
745 return false;
746 }
747 }
748 return true;
749 }
751 void
752 MediaCache::AppendMostReusableBlock(BlockList* aBlockList,
753 nsTArray<uint32_t>* aResult,
754 int32_t aBlockIndexLimit)
755 {
756 mReentrantMonitor.AssertCurrentThreadIn();
758 int32_t blockIndex = aBlockList->GetLastBlock();
759 if (blockIndex < 0)
760 return;
761 do {
762 // Don't consider blocks for pinned streams, or blocks that are
763 // beyond the specified limit, or a block that contains a stream's
764 // current read position (such a block contains both played data
765 // and readahead data)
766 if (blockIndex < aBlockIndexLimit && BlockIsReusable(blockIndex)) {
767 aResult->AppendElement(blockIndex);
768 return;
769 }
770 blockIndex = aBlockList->GetPrevBlock(blockIndex);
771 } while (blockIndex >= 0);
772 }
774 int32_t
775 MediaCache::FindReusableBlock(TimeStamp aNow,
776 MediaCacheStream* aForStream,
777 int32_t aForStreamBlock,
778 int32_t aMaxSearchBlockIndex)
779 {
780 mReentrantMonitor.AssertCurrentThreadIn();
782 uint32_t length = std::min(uint32_t(aMaxSearchBlockIndex), mIndex.Length());
784 if (aForStream && aForStreamBlock > 0 &&
785 uint32_t(aForStreamBlock) <= aForStream->mBlocks.Length()) {
786 int32_t prevCacheBlock = aForStream->mBlocks[aForStreamBlock - 1];
787 if (prevCacheBlock >= 0) {
788 uint32_t freeBlockScanEnd =
789 std::min(length, prevCacheBlock + FREE_BLOCK_SCAN_LIMIT);
790 for (uint32_t i = prevCacheBlock; i < freeBlockScanEnd; ++i) {
791 if (IsBlockFree(i))
792 return i;
793 }
794 }
795 }
797 if (!mFreeBlocks.IsEmpty()) {
798 int32_t blockIndex = mFreeBlocks.GetFirstBlock();
799 do {
800 if (blockIndex < aMaxSearchBlockIndex)
801 return blockIndex;
802 blockIndex = mFreeBlocks.GetNextBlock(blockIndex);
803 } while (blockIndex >= 0);
804 }
806 // Build a list of the blocks we should consider for the "latest
807 // predicted time of next use". We can exploit the fact that the block
808 // linked lists are ordered by increasing time of next use. This is
809 // actually the whole point of having the linked lists.
810 nsAutoTArray<uint32_t,8> candidates;
811 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
812 MediaCacheStream* stream = mStreams[i];
813 if (stream->mPinCount > 0) {
814 // No point in even looking at this stream's blocks
815 continue;
816 }
818 AppendMostReusableBlock(&stream->mMetadataBlocks, &candidates, length);
819 AppendMostReusableBlock(&stream->mPlayedBlocks, &candidates, length);
821 // Don't consider readahead blocks in non-seekable streams. If we
822 // remove the block we won't be able to seek back to read it later.
823 if (stream->mIsTransportSeekable) {
824 AppendMostReusableBlock(&stream->mReadaheadBlocks, &candidates, length);
825 }
826 }
828 TimeDuration latestUse;
829 int32_t latestUseBlock = -1;
830 for (uint32_t i = 0; i < candidates.Length(); ++i) {
831 TimeDuration nextUse = PredictNextUse(aNow, candidates[i]);
832 if (nextUse > latestUse) {
833 latestUse = nextUse;
834 latestUseBlock = candidates[i];
835 }
836 }
838 return latestUseBlock;
839 }
841 MediaCache::BlockList*
842 MediaCache::GetListForBlock(BlockOwner* aBlock)
843 {
844 switch (aBlock->mClass) {
845 case METADATA_BLOCK:
846 NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?");
847 return &aBlock->mStream->mMetadataBlocks;
848 case PLAYED_BLOCK:
849 NS_ASSERTION(aBlock->mStream, "Metadata block has no stream?");
850 return &aBlock->mStream->mPlayedBlocks;
851 case READAHEAD_BLOCK:
852 NS_ASSERTION(aBlock->mStream, "Readahead block has no stream?");
853 return &aBlock->mStream->mReadaheadBlocks;
854 default:
855 NS_ERROR("Invalid block class");
856 return nullptr;
857 }
858 }
860 MediaCache::BlockOwner*
861 MediaCache::GetBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream)
862 {
863 Block* block = &mIndex[aBlockIndex];
864 for (uint32_t i = 0; i < block->mOwners.Length(); ++i) {
865 if (block->mOwners[i].mStream == aStream)
866 return &block->mOwners[i];
867 }
868 return nullptr;
869 }
871 void
872 MediaCache::SwapBlocks(int32_t aBlockIndex1, int32_t aBlockIndex2)
873 {
874 mReentrantMonitor.AssertCurrentThreadIn();
876 Block* block1 = &mIndex[aBlockIndex1];
877 Block* block2 = &mIndex[aBlockIndex2];
879 block1->mOwners.SwapElements(block2->mOwners);
881 // Now all references to block1 have to be replaced with block2 and
882 // vice versa.
883 // First update stream references to blocks via mBlocks.
884 const Block* blocks[] = { block1, block2 };
885 int32_t blockIndices[] = { aBlockIndex1, aBlockIndex2 };
886 for (int32_t i = 0; i < 2; ++i) {
887 for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) {
888 const BlockOwner* b = &blocks[i]->mOwners[j];
889 b->mStream->mBlocks[b->mStreamBlock] = blockIndices[i];
890 }
891 }
893 // Now update references to blocks in block lists.
894 mFreeBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
896 nsTHashtable<nsPtrHashKey<MediaCacheStream> > visitedStreams;
898 for (int32_t i = 0; i < 2; ++i) {
899 for (uint32_t j = 0; j < blocks[i]->mOwners.Length(); ++j) {
900 MediaCacheStream* stream = blocks[i]->mOwners[j].mStream;
901 // Make sure that we don't update the same stream twice --- that
902 // would result in swapping the block references back again!
903 if (visitedStreams.GetEntry(stream))
904 continue;
905 visitedStreams.PutEntry(stream);
906 stream->mReadaheadBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
907 stream->mPlayedBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
908 stream->mMetadataBlocks.NotifyBlockSwapped(aBlockIndex1, aBlockIndex2);
909 }
910 }
912 Verify();
913 }
915 void
916 MediaCache::RemoveBlockOwner(int32_t aBlockIndex, MediaCacheStream* aStream)
917 {
918 Block* block = &mIndex[aBlockIndex];
919 for (uint32_t i = 0; i < block->mOwners.Length(); ++i) {
920 BlockOwner* bo = &block->mOwners[i];
921 if (bo->mStream == aStream) {
922 GetListForBlock(bo)->RemoveBlock(aBlockIndex);
923 bo->mStream->mBlocks[bo->mStreamBlock] = -1;
924 block->mOwners.RemoveElementAt(i);
925 if (block->mOwners.IsEmpty()) {
926 mFreeBlocks.AddFirstBlock(aBlockIndex);
927 }
928 return;
929 }
930 }
931 }
933 void
934 MediaCache::AddBlockOwnerAsReadahead(int32_t aBlockIndex,
935 MediaCacheStream* aStream,
936 int32_t aStreamBlockIndex)
937 {
938 Block* block = &mIndex[aBlockIndex];
939 if (block->mOwners.IsEmpty()) {
940 mFreeBlocks.RemoveBlock(aBlockIndex);
941 }
942 BlockOwner* bo = block->mOwners.AppendElement();
943 bo->mStream = aStream;
944 bo->mStreamBlock = aStreamBlockIndex;
945 aStream->mBlocks[aStreamBlockIndex] = aBlockIndex;
946 bo->mClass = READAHEAD_BLOCK;
947 InsertReadaheadBlock(bo, aBlockIndex);
948 }
950 void
951 MediaCache::FreeBlock(int32_t aBlock)
952 {
953 mReentrantMonitor.AssertCurrentThreadIn();
955 Block* block = &mIndex[aBlock];
956 if (block->mOwners.IsEmpty()) {
957 // already free
958 return;
959 }
961 CACHE_LOG(PR_LOG_DEBUG, ("Released block %d", aBlock));
963 for (uint32_t i = 0; i < block->mOwners.Length(); ++i) {
964 BlockOwner* bo = &block->mOwners[i];
965 GetListForBlock(bo)->RemoveBlock(aBlock);
966 bo->mStream->mBlocks[bo->mStreamBlock] = -1;
967 }
968 block->mOwners.Clear();
969 mFreeBlocks.AddFirstBlock(aBlock);
970 Verify();
971 }
973 TimeDuration
974 MediaCache::PredictNextUse(TimeStamp aNow, int32_t aBlock)
975 {
976 mReentrantMonitor.AssertCurrentThreadIn();
977 NS_ASSERTION(!IsBlockFree(aBlock), "aBlock is free");
979 Block* block = &mIndex[aBlock];
980 // Blocks can be belong to multiple streams. The predicted next use
981 // time is the earliest time predicted by any of the streams.
982 TimeDuration result;
983 for (uint32_t i = 0; i < block->mOwners.Length(); ++i) {
984 BlockOwner* bo = &block->mOwners[i];
985 TimeDuration prediction;
986 switch (bo->mClass) {
987 case METADATA_BLOCK:
988 // This block should be managed in LRU mode. For metadata we predict
989 // that the time until the next use is the time since the last use.
990 prediction = aNow - bo->mLastUseTime;
991 break;
992 case PLAYED_BLOCK: {
993 // This block should be managed in LRU mode, and we should impose
994 // a "replay delay" to reflect the likelihood of replay happening
995 NS_ASSERTION(static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE <
996 bo->mStream->mStreamOffset,
997 "Played block after the current stream position?");
998 int64_t bytesBehind =
999 bo->mStream->mStreamOffset - static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE;
1000 int64_t millisecondsBehind =
1001 bytesBehind*1000/bo->mStream->mPlaybackBytesPerSecond;
1002 prediction = TimeDuration::FromMilliseconds(
1003 std::min<int64_t>(millisecondsBehind*REPLAY_PENALTY_FACTOR, INT32_MAX));
1004 break;
1005 }
1006 case READAHEAD_BLOCK: {
1007 int64_t bytesAhead =
1008 static_cast<int64_t>(bo->mStreamBlock)*BLOCK_SIZE - bo->mStream->mStreamOffset;
1009 NS_ASSERTION(bytesAhead >= 0,
1010 "Readahead block before the current stream position?");
1011 int64_t millisecondsAhead =
1012 bytesAhead*1000/bo->mStream->mPlaybackBytesPerSecond;
1013 prediction = TimeDuration::FromMilliseconds(
1014 std::min<int64_t>(millisecondsAhead, INT32_MAX));
1015 break;
1016 }
1017 default:
1018 NS_ERROR("Invalid class for predicting next use");
1019 return TimeDuration(0);
1020 }
1021 if (i == 0 || prediction < result) {
1022 result = prediction;
1023 }
1024 }
1025 return result;
1026 }
1028 TimeDuration
1029 MediaCache::PredictNextUseForIncomingData(MediaCacheStream* aStream)
1030 {
1031 mReentrantMonitor.AssertCurrentThreadIn();
1033 int64_t bytesAhead = aStream->mChannelOffset - aStream->mStreamOffset;
1034 if (bytesAhead <= -BLOCK_SIZE) {
1035 // Hmm, no idea when data behind us will be used. Guess 24 hours.
1036 return TimeDuration::FromSeconds(24*60*60);
1037 }
1038 if (bytesAhead <= 0)
1039 return TimeDuration(0);
1040 int64_t millisecondsAhead = bytesAhead*1000/aStream->mPlaybackBytesPerSecond;
1041 return TimeDuration::FromMilliseconds(
1042 std::min<int64_t>(millisecondsAhead, INT32_MAX));
1043 }
1045 enum StreamAction { NONE, SEEK, SEEK_AND_RESUME, RESUME, SUSPEND };
1047 void
1048 MediaCache::Update()
1049 {
1050 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1052 // The action to use for each stream. We store these so we can make
1053 // decisions while holding the cache lock but implement those decisions
1054 // without holding the cache lock, since we need to call out to
1055 // stream, decoder and element code.
1056 nsAutoTArray<StreamAction,10> actions;
1058 {
1059 ReentrantMonitorAutoEnter mon(mReentrantMonitor);
1060 mUpdateQueued = false;
1061 #ifdef DEBUG
1062 mInUpdate = true;
1063 #endif
1065 int32_t maxBlocks = GetMaxBlocks();
1066 TimeStamp now = TimeStamp::Now();
1068 int32_t freeBlockCount = mFreeBlocks.GetCount();
1069 TimeDuration latestPredictedUseForOverflow = 0;
1070 if (mIndex.Length() > uint32_t(maxBlocks)) {
1071 // Try to trim back the cache to its desired maximum size. The cache may
1072 // have overflowed simply due to data being received when we have
1073 // no blocks in the main part of the cache that are free or lower
1074 // priority than the new data. The cache can also be overflowing because
1075 // the media.cache_size preference was reduced.
1076 // First, figure out what the least valuable block in the cache overflow
1077 // is. We don't want to replace any blocks in the main part of the
1078 // cache whose expected time of next use is earlier or equal to that.
1079 // If we allow that, we can effectively end up discarding overflowing
1080 // blocks (by moving an overflowing block to the main part of the cache,
1081 // and then overwriting it with another overflowing block), and we try
1082 // to avoid that since it requires HTTP seeks.
1083 // We also use this loop to eliminate overflowing blocks from
1084 // freeBlockCount.
1085 for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks;
1086 --blockIndex) {
1087 if (IsBlockFree(blockIndex)) {
1088 // Don't count overflowing free blocks in our free block count
1089 --freeBlockCount;
1090 continue;
1091 }
1092 TimeDuration predictedUse = PredictNextUse(now, blockIndex);
1093 latestPredictedUseForOverflow = std::max(latestPredictedUseForOverflow, predictedUse);
1094 }
1095 } else {
1096 freeBlockCount += maxBlocks - mIndex.Length();
1097 }
1099 // Now try to move overflowing blocks to the main part of the cache.
1100 for (int32_t blockIndex = mIndex.Length() - 1; blockIndex >= maxBlocks;
1101 --blockIndex) {
1102 if (IsBlockFree(blockIndex))
1103 continue;
1105 Block* block = &mIndex[blockIndex];
1106 // Try to relocate the block close to other blocks for the first stream.
1107 // There is no point in trying to make it close to other blocks in
1108 // *all* the streams it might belong to.
1109 int32_t destinationBlockIndex =
1110 FindReusableBlock(now, block->mOwners[0].mStream,
1111 block->mOwners[0].mStreamBlock, maxBlocks);
1112 if (destinationBlockIndex < 0) {
1113 // Nowhere to place this overflow block. We won't be able to
1114 // place any more overflow blocks.
1115 break;
1116 }
1118 if (IsBlockFree(destinationBlockIndex) ||
1119 PredictNextUse(now, destinationBlockIndex) > latestPredictedUseForOverflow) {
1120 // Reuse blocks in the main part of the cache that are less useful than
1121 // the least useful overflow blocks
1123 nsresult rv = mFileCache->MoveBlock(blockIndex, destinationBlockIndex);
1125 if (NS_SUCCEEDED(rv)) {
1126 // We successfully copied the file data.
1127 CACHE_LOG(PR_LOG_DEBUG, ("Swapping blocks %d and %d (trimming cache)",
1128 blockIndex, destinationBlockIndex));
1129 // Swapping the block metadata here lets us maintain the
1130 // correct positions in the linked lists
1131 SwapBlocks(blockIndex, destinationBlockIndex);
1132 //Free the overflowing block even if the copy failed.
1133 CACHE_LOG(PR_LOG_DEBUG, ("Released block %d (trimming cache)", blockIndex));
1134 FreeBlock(blockIndex);
1135 }
1136 } else {
1137 CACHE_LOG(PR_LOG_DEBUG, ("Could not trim cache block %d (destination %d, predicted next use %f, latest predicted use for overflow %f",
1138 blockIndex, destinationBlockIndex,
1139 PredictNextUse(now, destinationBlockIndex).ToSeconds(),
1140 latestPredictedUseForOverflow.ToSeconds()));
1141 }
1142 }
1143 // Try chopping back the array of cache entries and the cache file.
1144 Truncate();
1146 // Count the blocks allocated for readahead of non-seekable streams
1147 // (these blocks can't be freed but we don't want them to monopolize the
1148 // cache)
1149 int32_t nonSeekableReadaheadBlockCount = 0;
1150 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
1151 MediaCacheStream* stream = mStreams[i];
1152 if (!stream->mIsTransportSeekable) {
1153 nonSeekableReadaheadBlockCount += stream->mReadaheadBlocks.GetCount();
1154 }
1155 }
1157 // If freeBlockCount is zero, then compute the latest of
1158 // the predicted next-uses for all blocks
1159 TimeDuration latestNextUse;
1160 if (freeBlockCount == 0) {
1161 int32_t reusableBlock = FindReusableBlock(now, nullptr, 0, maxBlocks);
1162 if (reusableBlock >= 0) {
1163 latestNextUse = PredictNextUse(now, reusableBlock);
1164 }
1165 }
1167 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
1168 actions.AppendElement(NONE);
1170 MediaCacheStream* stream = mStreams[i];
1171 if (stream->mClosed)
1172 continue;
1174 // Figure out where we should be reading from. It's the first
1175 // uncached byte after the current mStreamOffset.
1176 int64_t dataOffset = stream->GetCachedDataEndInternal(stream->mStreamOffset);
1177 MOZ_ASSERT(dataOffset >= 0);
1179 // Compute where we'd actually seek to to read at readOffset
1180 int64_t desiredOffset = dataOffset;
1181 if (stream->mIsTransportSeekable) {
1182 if (desiredOffset > stream->mChannelOffset &&
1183 desiredOffset <= stream->mChannelOffset + SEEK_VS_READ_THRESHOLD) {
1184 // Assume it's more efficient to just keep reading up to the
1185 // desired position instead of trying to seek
1186 desiredOffset = stream->mChannelOffset;
1187 }
1188 } else {
1189 // We can't seek directly to the desired offset...
1190 if (stream->mChannelOffset > desiredOffset) {
1191 // Reading forward won't get us anywhere, we need to go backwards.
1192 // Seek back to 0 (the client will reopen the stream) and then
1193 // read forward.
1194 NS_WARNING("Can't seek backwards, so seeking to 0");
1195 desiredOffset = 0;
1196 // Flush cached blocks out, since if this is a live stream
1197 // the cached data may be completely different next time we
1198 // read it. We have to assume that live streams don't
1199 // advertise themselves as being seekable...
1200 ReleaseStreamBlocks(stream);
1201 } else {
1202 // otherwise reading forward is looking good, so just stay where we
1203 // are and don't trigger a channel seek!
1204 desiredOffset = stream->mChannelOffset;
1205 }
1206 }
1208 // Figure out if we should be reading data now or not. It's amazing
1209 // how complex this is, but each decision is simple enough.
1210 bool enableReading;
1211 if (stream->mStreamLength >= 0 && dataOffset >= stream->mStreamLength) {
1212 // We want data at the end of the stream, where there's nothing to
1213 // read. We don't want to try to read if we're suspended, because that
1214 // might create a new channel and seek unnecessarily (and incorrectly,
1215 // since HTTP doesn't allow seeking to the actual EOF), and we don't want
1216 // to suspend if we're not suspended and already reading at the end of
1217 // the stream, since there just might be more data than the server
1218 // advertised with Content-Length, and we may as well keep reading.
1219 // But we don't want to seek to the end of the stream if we're not
1220 // already there.
1221 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p at end of stream", stream));
1222 enableReading = !stream->mCacheSuspended &&
1223 stream->mStreamLength == stream->mChannelOffset;
1224 } else if (desiredOffset < stream->mStreamOffset) {
1225 // We're reading to try to catch up to where the current stream
1226 // reader wants to be. Better not stop.
1227 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p catching up", stream));
1228 enableReading = true;
1229 } else if (desiredOffset < stream->mStreamOffset + BLOCK_SIZE) {
1230 // The stream reader is waiting for us, or nearly so. Better feed it.
1231 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p feeding reader", stream));
1232 enableReading = true;
1233 } else if (!stream->mIsTransportSeekable &&
1234 nonSeekableReadaheadBlockCount >= maxBlocks*NONSEEKABLE_READAHEAD_MAX) {
1235 // This stream is not seekable and there are already too many blocks
1236 // being cached for readahead for nonseekable streams (which we can't
1237 // free). So stop reading ahead now.
1238 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling non-seekable readahead", stream));
1239 enableReading = false;
1240 } else if (mIndex.Length() > uint32_t(maxBlocks)) {
1241 // We're in the process of bringing the cache size back to the
1242 // desired limit, so don't bring in more data yet
1243 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling to reduce cache size", stream));
1244 enableReading = false;
1245 } else {
1246 TimeDuration predictedNewDataUse = PredictNextUseForIncomingData(stream);
1248 if (stream->mCacheSuspended &&
1249 predictedNewDataUse.ToMilliseconds() > CACHE_POWERSAVE_WAKEUP_LOW_THRESHOLD_MS) {
1250 // Don't need data for a while, so don't bother waking up the stream
1251 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p avoiding wakeup since more data is not needed", stream));
1252 enableReading = false;
1253 } else if (freeBlockCount > 0) {
1254 // Free blocks in the cache, so keep reading
1255 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p reading since there are free blocks", stream));
1256 enableReading = true;
1257 } else if (latestNextUse <= TimeDuration(0)) {
1258 // No reusable blocks, so can't read anything
1259 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p throttling due to no reusable blocks", stream));
1260 enableReading = false;
1261 } else {
1262 // Read ahead if the data we expect to read is more valuable than
1263 // the least valuable block in the main part of the cache
1264 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p predict next data in %f, current worst block is %f",
1265 stream, predictedNewDataUse.ToSeconds(), latestNextUse.ToSeconds()));
1266 enableReading = predictedNewDataUse < latestNextUse;
1267 }
1268 }
1270 if (enableReading) {
1271 for (uint32_t j = 0; j < i; ++j) {
1272 MediaCacheStream* other = mStreams[j];
1273 if (other->mResourceID == stream->mResourceID &&
1274 !other->mClient->IsSuspended() &&
1275 other->mChannelOffset/BLOCK_SIZE == desiredOffset/BLOCK_SIZE) {
1276 // This block is already going to be read by the other stream.
1277 // So don't try to read it from this stream as well.
1278 enableReading = false;
1279 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p waiting on same block (%lld) from stream %p",
1280 stream, desiredOffset/BLOCK_SIZE, other));
1281 break;
1282 }
1283 }
1284 }
1286 if (stream->mChannelOffset != desiredOffset && enableReading) {
1287 // We need to seek now.
1288 NS_ASSERTION(stream->mIsTransportSeekable || desiredOffset == 0,
1289 "Trying to seek in a non-seekable stream!");
1290 // Round seek offset down to the start of the block. This is essential
1291 // because we don't want to think we have part of a block already
1292 // in mPartialBlockBuffer.
1293 stream->mChannelOffset = (desiredOffset/BLOCK_SIZE)*BLOCK_SIZE;
1294 actions[i] = stream->mCacheSuspended ? SEEK_AND_RESUME : SEEK;
1295 } else if (enableReading && stream->mCacheSuspended) {
1296 actions[i] = RESUME;
1297 } else if (!enableReading && !stream->mCacheSuspended) {
1298 actions[i] = SUSPEND;
1299 }
1300 }
1301 #ifdef DEBUG
1302 mInUpdate = false;
1303 #endif
1304 }
1306 // Update the channel state without holding our cache lock. While we're
1307 // doing this, decoder threads may be running and seeking, reading or changing
1308 // other cache state. That's OK, they'll trigger new Update events and we'll
1309 // get back here and revise our decisions. The important thing here is that
1310 // performing these actions only depends on mChannelOffset and
1311 // the action, which can only be written by the main thread (i.e., this
1312 // thread), so we don't have races here.
1314 // First, update the mCacheSuspended/mCacheEnded flags so that they're all correct
1315 // when we fire our CacheClient commands below. Those commands can rely on these flags
1316 // being set correctly for all streams.
1317 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
1318 MediaCacheStream* stream = mStreams[i];
1319 switch (actions[i]) {
1320 case SEEK:
1321 case SEEK_AND_RESUME:
1322 stream->mCacheSuspended = false;
1323 stream->mChannelEnded = false;
1324 break;
1325 case RESUME:
1326 stream->mCacheSuspended = false;
1327 break;
1328 case SUSPEND:
1329 stream->mCacheSuspended = true;
1330 break;
1331 default:
1332 break;
1333 }
1334 stream->mHasHadUpdate = true;
1335 }
1337 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
1338 MediaCacheStream* stream = mStreams[i];
1339 nsresult rv;
1340 switch (actions[i]) {
1341 case SEEK:
1342 case SEEK_AND_RESUME:
1343 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p CacheSeek to %lld (resume=%d)", stream,
1344 (long long)stream->mChannelOffset, actions[i] == SEEK_AND_RESUME));
1345 rv = stream->mClient->CacheClientSeek(stream->mChannelOffset,
1346 actions[i] == SEEK_AND_RESUME);
1347 break;
1348 case RESUME:
1349 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Resumed", stream));
1350 rv = stream->mClient->CacheClientResume();
1351 break;
1352 case SUSPEND:
1353 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Suspended", stream));
1354 rv = stream->mClient->CacheClientSuspend();
1355 break;
1356 default:
1357 rv = NS_OK;
1358 break;
1359 }
1361 if (NS_FAILED(rv)) {
1362 // Close the streams that failed due to error. This will cause all
1363 // client Read and Seek operations on those streams to fail. Blocked
1364 // Reads will also be woken up.
1365 ReentrantMonitorAutoEnter mon(mReentrantMonitor);
1366 stream->CloseInternal(mon);
1367 }
1368 }
1369 }
1371 class UpdateEvent : public nsRunnable
1372 {
1373 public:
1374 NS_IMETHOD Run()
1375 {
1376 if (gMediaCache) {
1377 gMediaCache->Update();
1378 }
1379 return NS_OK;
1380 }
1381 };
1383 void
1384 MediaCache::QueueUpdate()
1385 {
1386 mReentrantMonitor.AssertCurrentThreadIn();
1388 // Queuing an update while we're in an update raises a high risk of
1389 // triggering endless events
1390 NS_ASSERTION(!mInUpdate,
1391 "Queuing an update while we're in an update");
1392 if (mUpdateQueued)
1393 return;
1394 mUpdateQueued = true;
1395 nsCOMPtr<nsIRunnable> event = new UpdateEvent();
1396 NS_DispatchToMainThread(event);
1397 }
1399 #ifdef DEBUG_VERIFY_CACHE
1400 void
1401 MediaCache::Verify()
1402 {
1403 mReentrantMonitor.AssertCurrentThreadIn();
1405 mFreeBlocks.Verify();
1406 for (uint32_t i = 0; i < mStreams.Length(); ++i) {
1407 MediaCacheStream* stream = mStreams[i];
1408 stream->mReadaheadBlocks.Verify();
1409 stream->mPlayedBlocks.Verify();
1410 stream->mMetadataBlocks.Verify();
1412 // Verify that the readahead blocks are listed in stream block order
1413 int32_t block = stream->mReadaheadBlocks.GetFirstBlock();
1414 int32_t lastStreamBlock = -1;
1415 while (block >= 0) {
1416 uint32_t j = 0;
1417 while (mIndex[block].mOwners[j].mStream != stream) {
1418 ++j;
1419 }
1420 int32_t nextStreamBlock =
1421 int32_t(mIndex[block].mOwners[j].mStreamBlock);
1422 NS_ASSERTION(lastStreamBlock < nextStreamBlock,
1423 "Blocks not increasing in readahead stream");
1424 lastStreamBlock = nextStreamBlock;
1425 block = stream->mReadaheadBlocks.GetNextBlock(block);
1426 }
1427 }
1428 }
1429 #endif
1431 void
1432 MediaCache::InsertReadaheadBlock(BlockOwner* aBlockOwner,
1433 int32_t aBlockIndex)
1434 {
1435 mReentrantMonitor.AssertCurrentThreadIn();
1437 // Find the last block whose stream block is before aBlockIndex's
1438 // stream block, and insert after it
1439 MediaCacheStream* stream = aBlockOwner->mStream;
1440 int32_t readaheadIndex = stream->mReadaheadBlocks.GetLastBlock();
1441 while (readaheadIndex >= 0) {
1442 BlockOwner* bo = GetBlockOwner(readaheadIndex, stream);
1443 NS_ASSERTION(bo, "stream must own its blocks");
1444 if (bo->mStreamBlock < aBlockOwner->mStreamBlock) {
1445 stream->mReadaheadBlocks.AddAfter(aBlockIndex, readaheadIndex);
1446 return;
1447 }
1448 NS_ASSERTION(bo->mStreamBlock > aBlockOwner->mStreamBlock,
1449 "Duplicated blocks??");
1450 readaheadIndex = stream->mReadaheadBlocks.GetPrevBlock(readaheadIndex);
1451 }
1453 stream->mReadaheadBlocks.AddFirstBlock(aBlockIndex);
1454 Verify();
1455 }
1457 void
1458 MediaCache::AllocateAndWriteBlock(MediaCacheStream* aStream, const void* aData,
1459 MediaCacheStream::ReadMode aMode)
1460 {
1461 mReentrantMonitor.AssertCurrentThreadIn();
1463 int32_t streamBlockIndex = aStream->mChannelOffset/BLOCK_SIZE;
1465 // Remove all cached copies of this block
1466 ResourceStreamIterator iter(aStream->mResourceID);
1467 while (MediaCacheStream* stream = iter.Next()) {
1468 while (streamBlockIndex >= int32_t(stream->mBlocks.Length())) {
1469 stream->mBlocks.AppendElement(-1);
1470 }
1471 if (stream->mBlocks[streamBlockIndex] >= 0) {
1472 // We no longer want to own this block
1473 int32_t globalBlockIndex = stream->mBlocks[streamBlockIndex];
1474 CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)",
1475 globalBlockIndex, stream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE));
1476 RemoveBlockOwner(globalBlockIndex, stream);
1477 }
1478 }
1480 // Extend the mBlocks array as necessary
1482 TimeStamp now = TimeStamp::Now();
1483 int32_t blockIndex = FindBlockForIncomingData(now, aStream);
1484 if (blockIndex >= 0) {
1485 FreeBlock(blockIndex);
1487 Block* block = &mIndex[blockIndex];
1488 CACHE_LOG(PR_LOG_DEBUG, ("Allocated block %d to stream %p block %d(%lld)",
1489 blockIndex, aStream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE));
1491 mFreeBlocks.RemoveBlock(blockIndex);
1493 // Tell each stream using this resource about the new block.
1494 ResourceStreamIterator iter(aStream->mResourceID);
1495 while (MediaCacheStream* stream = iter.Next()) {
1496 BlockOwner* bo = block->mOwners.AppendElement();
1497 if (!bo)
1498 return;
1500 bo->mStream = stream;
1501 bo->mStreamBlock = streamBlockIndex;
1502 bo->mLastUseTime = now;
1503 stream->mBlocks[streamBlockIndex] = blockIndex;
1504 if (streamBlockIndex*BLOCK_SIZE < stream->mStreamOffset) {
1505 bo->mClass = aMode == MediaCacheStream::MODE_PLAYBACK
1506 ? PLAYED_BLOCK : METADATA_BLOCK;
1507 // This must be the most-recently-used block, since we
1508 // marked it as used now (which may be slightly bogus, but we'll
1509 // treat it as used for simplicity).
1510 GetListForBlock(bo)->AddFirstBlock(blockIndex);
1511 Verify();
1512 } else {
1513 // This may not be the latest readahead block, although it usually
1514 // will be. We may have to scan for the right place to insert
1515 // the block in the list.
1516 bo->mClass = READAHEAD_BLOCK;
1517 InsertReadaheadBlock(bo, blockIndex);
1518 }
1519 }
1521 nsresult rv = mFileCache->WriteBlock(blockIndex, reinterpret_cast<const uint8_t*>(aData));
1522 if (NS_FAILED(rv)) {
1523 CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)",
1524 blockIndex, aStream, streamBlockIndex, (long long)streamBlockIndex*BLOCK_SIZE));
1525 FreeBlock(blockIndex);
1526 }
1527 }
1529 // Queue an Update since the cache state has changed (for example
1530 // we might want to stop loading because the cache is full)
1531 QueueUpdate();
1532 }
1534 void
1535 MediaCache::OpenStream(MediaCacheStream* aStream)
1536 {
1537 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1539 ReentrantMonitorAutoEnter mon(mReentrantMonitor);
1540 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p opened", aStream));
1541 mStreams.AppendElement(aStream);
1542 aStream->mResourceID = AllocateResourceID();
1544 // Queue an update since a new stream has been opened.
1545 gMediaCache->QueueUpdate();
1546 }
1548 void
1549 MediaCache::ReleaseStream(MediaCacheStream* aStream)
1550 {
1551 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1553 ReentrantMonitorAutoEnter mon(mReentrantMonitor);
1554 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p closed", aStream));
1555 mStreams.RemoveElement(aStream);
1557 // Update MediaCache again for |mStreams| is changed.
1558 // We need to re-run Update() to ensure streams reading from the same resource
1559 // as the removed stream get a chance to continue reading.
1560 gMediaCache->QueueUpdate();
1561 }
1563 void
1564 MediaCache::ReleaseStreamBlocks(MediaCacheStream* aStream)
1565 {
1566 mReentrantMonitor.AssertCurrentThreadIn();
1568 // XXX scanning the entire stream doesn't seem great, if not much of it
1569 // is cached, but the only easy alternative is to scan the entire cache
1570 // which isn't better
1571 uint32_t length = aStream->mBlocks.Length();
1572 for (uint32_t i = 0; i < length; ++i) {
1573 int32_t blockIndex = aStream->mBlocks[i];
1574 if (blockIndex >= 0) {
1575 CACHE_LOG(PR_LOG_DEBUG, ("Released block %d from stream %p block %d(%lld)",
1576 blockIndex, aStream, i, (long long)i*BLOCK_SIZE));
1577 RemoveBlockOwner(blockIndex, aStream);
1578 }
1579 }
1580 }
1582 void
1583 MediaCache::Truncate()
1584 {
1585 uint32_t end;
1586 for (end = mIndex.Length(); end > 0; --end) {
1587 if (!IsBlockFree(end - 1))
1588 break;
1589 mFreeBlocks.RemoveBlock(end - 1);
1590 }
1592 if (end < mIndex.Length()) {
1593 mIndex.TruncateLength(end);
1594 // XXX We could truncate the cache file here, but we don't seem
1595 // to have a cross-platform API for doing that. At least when all
1596 // streams are closed we shut down the cache, which erases the
1597 // file at that point.
1598 }
1599 }
1601 void
1602 MediaCache::NoteBlockUsage(MediaCacheStream* aStream, int32_t aBlockIndex,
1603 MediaCacheStream::ReadMode aMode,
1604 TimeStamp aNow)
1605 {
1606 mReentrantMonitor.AssertCurrentThreadIn();
1608 if (aBlockIndex < 0) {
1609 // this block is not in the cache yet
1610 return;
1611 }
1613 BlockOwner* bo = GetBlockOwner(aBlockIndex, aStream);
1614 if (!bo) {
1615 // this block is not in the cache yet
1616 return;
1617 }
1619 // The following check has to be <= because the stream offset has
1620 // not yet been updated for the data read from this block
1621 NS_ASSERTION(bo->mStreamBlock*BLOCK_SIZE <= bo->mStream->mStreamOffset,
1622 "Using a block that's behind the read position?");
1624 GetListForBlock(bo)->RemoveBlock(aBlockIndex);
1625 bo->mClass =
1626 (aMode == MediaCacheStream::MODE_METADATA || bo->mClass == METADATA_BLOCK)
1627 ? METADATA_BLOCK : PLAYED_BLOCK;
1628 // Since this is just being used now, it can definitely be at the front
1629 // of mMetadataBlocks or mPlayedBlocks
1630 GetListForBlock(bo)->AddFirstBlock(aBlockIndex);
1631 bo->mLastUseTime = aNow;
1632 Verify();
1633 }
1635 void
1636 MediaCache::NoteSeek(MediaCacheStream* aStream, int64_t aOldOffset)
1637 {
1638 mReentrantMonitor.AssertCurrentThreadIn();
1640 if (aOldOffset < aStream->mStreamOffset) {
1641 // We seeked forward. Convert blocks from readahead to played.
1642 // Any readahead block that intersects the seeked-over range must
1643 // be converted.
1644 int32_t blockIndex = aOldOffset/BLOCK_SIZE;
1645 int32_t endIndex =
1646 std::min<int64_t>((aStream->mStreamOffset + BLOCK_SIZE - 1)/BLOCK_SIZE,
1647 aStream->mBlocks.Length());
1648 TimeStamp now = TimeStamp::Now();
1649 while (blockIndex < endIndex) {
1650 int32_t cacheBlockIndex = aStream->mBlocks[blockIndex];
1651 if (cacheBlockIndex >= 0) {
1652 // Marking the block used may not be exactly what we want but
1653 // it's simple
1654 NoteBlockUsage(aStream, cacheBlockIndex, MediaCacheStream::MODE_PLAYBACK,
1655 now);
1656 }
1657 ++blockIndex;
1658 }
1659 } else {
1660 // We seeked backward. Convert from played to readahead.
1661 // Any played block that is entirely after the start of the seeked-over
1662 // range must be converted.
1663 int32_t blockIndex =
1664 (aStream->mStreamOffset + BLOCK_SIZE - 1)/BLOCK_SIZE;
1665 int32_t endIndex =
1666 std::min<int64_t>((aOldOffset + BLOCK_SIZE - 1)/BLOCK_SIZE,
1667 aStream->mBlocks.Length());
1668 while (blockIndex < endIndex) {
1669 int32_t cacheBlockIndex = aStream->mBlocks[endIndex - 1];
1670 if (cacheBlockIndex >= 0) {
1671 BlockOwner* bo = GetBlockOwner(cacheBlockIndex, aStream);
1672 NS_ASSERTION(bo, "Stream doesn't own its blocks?");
1673 if (bo->mClass == PLAYED_BLOCK) {
1674 aStream->mPlayedBlocks.RemoveBlock(cacheBlockIndex);
1675 bo->mClass = READAHEAD_BLOCK;
1676 // Adding this as the first block is sure to be OK since
1677 // this must currently be the earliest readahead block
1678 // (that's why we're proceeding backwards from the end of
1679 // the seeked range to the start)
1680 aStream->mReadaheadBlocks.AddFirstBlock(cacheBlockIndex);
1681 Verify();
1682 }
1683 }
1684 --endIndex;
1685 }
1686 }
1687 }
1689 void
1690 MediaCacheStream::NotifyDataLength(int64_t aLength)
1691 {
1692 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1694 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1695 mStreamLength = aLength;
1696 }
1698 void
1699 MediaCacheStream::NotifyDataStarted(int64_t aOffset)
1700 {
1701 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1703 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1704 NS_WARN_IF_FALSE(aOffset == mChannelOffset,
1705 "Server is giving us unexpected offset");
1706 MOZ_ASSERT(aOffset >= 0);
1707 mChannelOffset = aOffset;
1708 if (mStreamLength >= 0) {
1709 // If we started reading at a certain offset, then for sure
1710 // the stream is at least that long.
1711 mStreamLength = std::max(mStreamLength, mChannelOffset);
1712 }
1713 }
1715 bool
1716 MediaCacheStream::UpdatePrincipal(nsIPrincipal* aPrincipal)
1717 {
1718 return nsContentUtils::CombineResourcePrincipals(&mPrincipal, aPrincipal);
1719 }
1721 void
1722 MediaCacheStream::NotifyDataReceived(int64_t aSize, const char* aData,
1723 nsIPrincipal* aPrincipal)
1724 {
1725 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1727 // Update principals before putting the data in the cache. This is important,
1728 // we want to make sure all principals are updated before any consumer
1729 // can see the new data.
1730 // We do this without holding the cache monitor, in case the client wants
1731 // to do something that takes a lock.
1732 {
1733 MediaCache::ResourceStreamIterator iter(mResourceID);
1734 while (MediaCacheStream* stream = iter.Next()) {
1735 if (stream->UpdatePrincipal(aPrincipal)) {
1736 stream->mClient->CacheClientNotifyPrincipalChanged();
1737 }
1738 }
1739 }
1741 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1742 int64_t size = aSize;
1743 const char* data = aData;
1745 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p DataReceived at %lld count=%lld",
1746 this, (long long)mChannelOffset, (long long)aSize));
1748 // We process the data one block (or part of a block) at a time
1749 while (size > 0) {
1750 uint32_t blockIndex = mChannelOffset/BLOCK_SIZE;
1751 int32_t blockOffset = int32_t(mChannelOffset - blockIndex*BLOCK_SIZE);
1752 int32_t chunkSize = std::min<int64_t>(BLOCK_SIZE - blockOffset, size);
1754 // This gets set to something non-null if we have a whole block
1755 // of data to write to the cache
1756 const char* blockDataToStore = nullptr;
1757 ReadMode mode = MODE_PLAYBACK;
1758 if (blockOffset == 0 && chunkSize == BLOCK_SIZE) {
1759 // We received a whole block, so avoid a useless copy through
1760 // mPartialBlockBuffer
1761 blockDataToStore = data;
1762 } else {
1763 if (blockOffset == 0) {
1764 // We've just started filling this buffer so now is a good time
1765 // to clear this flag.
1766 mMetadataInPartialBlockBuffer = false;
1767 }
1768 memcpy(reinterpret_cast<char*>(mPartialBlockBuffer.get()) + blockOffset,
1769 data, chunkSize);
1771 if (blockOffset + chunkSize == BLOCK_SIZE) {
1772 // We completed a block, so lets write it out.
1773 blockDataToStore = reinterpret_cast<char*>(mPartialBlockBuffer.get());
1774 if (mMetadataInPartialBlockBuffer) {
1775 mode = MODE_METADATA;
1776 }
1777 }
1778 }
1780 if (blockDataToStore) {
1781 gMediaCache->AllocateAndWriteBlock(this, blockDataToStore, mode);
1782 }
1784 mChannelOffset += chunkSize;
1785 size -= chunkSize;
1786 data += chunkSize;
1787 }
1789 MediaCache::ResourceStreamIterator iter(mResourceID);
1790 while (MediaCacheStream* stream = iter.Next()) {
1791 if (stream->mStreamLength >= 0) {
1792 // The stream is at least as long as what we've read
1793 stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset);
1794 }
1795 stream->mClient->CacheClientNotifyDataReceived();
1796 }
1798 // Notify in case there's a waiting reader
1799 // XXX it would be fairly easy to optimize things a lot more to
1800 // avoid waking up reader threads unnecessarily
1801 mon.NotifyAll();
1802 }
1804 void
1805 MediaCacheStream::FlushPartialBlockInternal(bool aNotifyAll)
1806 {
1807 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1809 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1811 int32_t blockOffset = int32_t(mChannelOffset%BLOCK_SIZE);
1812 if (blockOffset > 0) {
1813 CACHE_LOG(PR_LOG_DEBUG,
1814 ("Stream %p writing partial block: [%d] bytes; "
1815 "mStreamOffset [%lld] mChannelOffset[%lld] mStreamLength [%lld] "
1816 "notifying: [%s]",
1817 this, blockOffset, mStreamOffset, mChannelOffset, mStreamLength,
1818 aNotifyAll ? "yes" : "no"));
1820 // Write back the partial block
1821 memset(reinterpret_cast<char*>(mPartialBlockBuffer.get()) + blockOffset, 0,
1822 BLOCK_SIZE - blockOffset);
1823 gMediaCache->AllocateAndWriteBlock(this, mPartialBlockBuffer,
1824 mMetadataInPartialBlockBuffer ? MODE_METADATA : MODE_PLAYBACK);
1825 if (aNotifyAll) {
1826 // Wake up readers who may be waiting for this data
1827 mon.NotifyAll();
1828 }
1829 }
1830 }
1832 void
1833 MediaCacheStream::FlushPartialBlock()
1834 {
1835 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1837 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1839 // Write the current partial block to memory.
1840 // Note: This writes a full block, so if data is not at the end of the
1841 // stream, the decoder must subsequently choose correct start and end offsets
1842 // for reading/seeking.
1843 FlushPartialBlockInternal(false);
1845 gMediaCache->QueueUpdate();
1846 }
1848 void
1849 MediaCacheStream::NotifyDataEnded(nsresult aStatus)
1850 {
1851 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1853 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1855 if (NS_FAILED(aStatus)) {
1856 // Disconnect from other streams sharing our resource, since they
1857 // should continue trying to load. Our load might have been deliberately
1858 // canceled and that shouldn't affect other streams.
1859 mResourceID = gMediaCache->AllocateResourceID();
1860 }
1862 FlushPartialBlockInternal(true);
1864 if (!mDidNotifyDataEnded) {
1865 MediaCache::ResourceStreamIterator iter(mResourceID);
1866 while (MediaCacheStream* stream = iter.Next()) {
1867 if (NS_SUCCEEDED(aStatus)) {
1868 // We read the whole stream, so remember the true length
1869 stream->mStreamLength = mChannelOffset;
1870 }
1871 NS_ASSERTION(!stream->mDidNotifyDataEnded, "Stream already ended!");
1872 stream->mDidNotifyDataEnded = true;
1873 stream->mNotifyDataEndedStatus = aStatus;
1874 stream->mClient->CacheClientNotifyDataEnded(aStatus);
1875 }
1876 }
1878 mChannelEnded = true;
1879 gMediaCache->QueueUpdate();
1880 }
1882 MediaCacheStream::~MediaCacheStream()
1883 {
1884 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1885 NS_ASSERTION(!mPinCount, "Unbalanced Pin");
1887 if (gMediaCache) {
1888 NS_ASSERTION(mClosed, "Stream was not closed");
1889 gMediaCache->ReleaseStream(this);
1890 MediaCache::MaybeShutdown();
1891 }
1892 }
1894 void
1895 MediaCacheStream::SetTransportSeekable(bool aIsTransportSeekable)
1896 {
1897 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1898 NS_ASSERTION(mIsTransportSeekable || aIsTransportSeekable ||
1899 mChannelOffset == 0, "channel offset must be zero when we become non-seekable");
1900 mIsTransportSeekable = aIsTransportSeekable;
1901 // Queue an Update since we may change our strategy for dealing
1902 // with this stream
1903 gMediaCache->QueueUpdate();
1904 }
1906 bool
1907 MediaCacheStream::IsTransportSeekable()
1908 {
1909 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1910 return mIsTransportSeekable;
1911 }
1913 bool
1914 MediaCacheStream::AreAllStreamsForResourceSuspended()
1915 {
1916 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1917 MediaCache::ResourceStreamIterator iter(mResourceID);
1918 // Look for a stream that's able to read the data we need
1919 int64_t dataOffset = -1;
1920 while (MediaCacheStream* stream = iter.Next()) {
1921 if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) {
1922 continue;
1923 }
1924 if (dataOffset < 0) {
1925 dataOffset = GetCachedDataEndInternal(mStreamOffset);
1926 }
1927 // Ignore streams that are reading beyond the data we need
1928 if (stream->mChannelOffset > dataOffset) {
1929 continue;
1930 }
1931 return false;
1932 }
1934 return true;
1935 }
1937 void
1938 MediaCacheStream::Close()
1939 {
1940 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1942 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1943 CloseInternal(mon);
1944 // Queue an Update since we may have created more free space. Don't do
1945 // it from CloseInternal since that gets called by Update() itself
1946 // sometimes, and we try to not to queue updates from Update().
1947 gMediaCache->QueueUpdate();
1948 }
1950 void
1951 MediaCacheStream::EnsureCacheUpdate()
1952 {
1953 if (mHasHadUpdate)
1954 return;
1955 gMediaCache->Update();
1956 }
1958 void
1959 MediaCacheStream::CloseInternal(ReentrantMonitorAutoEnter& aReentrantMonitor)
1960 {
1961 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
1963 if (mClosed)
1964 return;
1965 mClosed = true;
1966 gMediaCache->ReleaseStreamBlocks(this);
1967 // Wake up any blocked readers
1968 aReentrantMonitor.NotifyAll();
1969 }
1971 void
1972 MediaCacheStream::Pin()
1973 {
1974 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1975 ++mPinCount;
1976 // Queue an Update since we may no longer want to read more into the
1977 // cache, if this stream's block have become non-evictable
1978 gMediaCache->QueueUpdate();
1979 }
1981 void
1982 MediaCacheStream::Unpin()
1983 {
1984 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1985 NS_ASSERTION(mPinCount > 0, "Unbalanced Unpin");
1986 --mPinCount;
1987 // Queue an Update since we may be able to read more into the
1988 // cache, if this stream's block have become evictable
1989 gMediaCache->QueueUpdate();
1990 }
1992 int64_t
1993 MediaCacheStream::GetLength()
1994 {
1995 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
1996 return mStreamLength;
1997 }
1999 int64_t
2000 MediaCacheStream::GetNextCachedData(int64_t aOffset)
2001 {
2002 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2003 return GetNextCachedDataInternal(aOffset);
2004 }
2006 int64_t
2007 MediaCacheStream::GetCachedDataEnd(int64_t aOffset)
2008 {
2009 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2010 return GetCachedDataEndInternal(aOffset);
2011 }
2013 bool
2014 MediaCacheStream::IsDataCachedToEndOfStream(int64_t aOffset)
2015 {
2016 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2017 if (mStreamLength < 0)
2018 return false;
2019 return GetCachedDataEndInternal(aOffset) >= mStreamLength;
2020 }
2022 int64_t
2023 MediaCacheStream::GetCachedDataEndInternal(int64_t aOffset)
2024 {
2025 gMediaCache->GetReentrantMonitor().AssertCurrentThreadIn();
2026 uint32_t startBlockIndex = aOffset/BLOCK_SIZE;
2027 uint32_t blockIndex = startBlockIndex;
2028 while (blockIndex < mBlocks.Length() && mBlocks[blockIndex] != -1) {
2029 ++blockIndex;
2030 }
2031 int64_t result = blockIndex*BLOCK_SIZE;
2032 if (blockIndex == mChannelOffset/BLOCK_SIZE) {
2033 // The block containing mChannelOffset may be partially read but not
2034 // yet committed to the main cache
2035 result = mChannelOffset;
2036 }
2037 if (mStreamLength >= 0) {
2038 // The last block in the cache may only be partially valid, so limit
2039 // the cached range to the stream length
2040 result = std::min(result, mStreamLength);
2041 }
2042 return std::max(result, aOffset);
2043 }
2045 int64_t
2046 MediaCacheStream::GetNextCachedDataInternal(int64_t aOffset)
2047 {
2048 gMediaCache->GetReentrantMonitor().AssertCurrentThreadIn();
2049 if (aOffset == mStreamLength)
2050 return -1;
2052 uint32_t startBlockIndex = aOffset/BLOCK_SIZE;
2053 uint32_t channelBlockIndex = mChannelOffset/BLOCK_SIZE;
2055 if (startBlockIndex == channelBlockIndex &&
2056 aOffset < mChannelOffset) {
2057 // The block containing mChannelOffset is partially read, but not
2058 // yet committed to the main cache. aOffset lies in the partially
2059 // read portion, thus it is effectively cached.
2060 return aOffset;
2061 }
2063 if (startBlockIndex >= mBlocks.Length())
2064 return -1;
2066 // Is the current block cached?
2067 if (mBlocks[startBlockIndex] != -1)
2068 return aOffset;
2070 // Count the number of uncached blocks
2071 bool hasPartialBlock = (mChannelOffset % BLOCK_SIZE) != 0;
2072 uint32_t blockIndex = startBlockIndex + 1;
2073 while (true) {
2074 if ((hasPartialBlock && blockIndex == channelBlockIndex) ||
2075 (blockIndex < mBlocks.Length() && mBlocks[blockIndex] != -1)) {
2076 // We at the incoming channel block, which has has data in it,
2077 // or are we at a cached block. Return index of block start.
2078 return blockIndex * BLOCK_SIZE;
2079 }
2081 // No more cached blocks?
2082 if (blockIndex >= mBlocks.Length())
2083 return -1;
2085 ++blockIndex;
2086 }
2088 NS_NOTREACHED("Should return in loop");
2089 return -1;
2090 }
2092 void
2093 MediaCacheStream::SetReadMode(ReadMode aMode)
2094 {
2095 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2096 if (aMode == mCurrentMode)
2097 return;
2098 mCurrentMode = aMode;
2099 gMediaCache->QueueUpdate();
2100 }
2102 void
2103 MediaCacheStream::SetPlaybackRate(uint32_t aBytesPerSecond)
2104 {
2105 NS_ASSERTION(aBytesPerSecond > 0, "Zero playback rate not allowed");
2106 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2107 if (aBytesPerSecond == mPlaybackBytesPerSecond)
2108 return;
2109 mPlaybackBytesPerSecond = aBytesPerSecond;
2110 gMediaCache->QueueUpdate();
2111 }
2113 nsresult
2114 MediaCacheStream::Seek(int32_t aWhence, int64_t aOffset)
2115 {
2116 NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread");
2118 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2119 if (mClosed)
2120 return NS_ERROR_FAILURE;
2122 int64_t oldOffset = mStreamOffset;
2123 int64_t newOffset = mStreamOffset;
2124 switch (aWhence) {
2125 case PR_SEEK_END:
2126 if (mStreamLength < 0)
2127 return NS_ERROR_FAILURE;
2128 newOffset = mStreamLength + aOffset;
2129 break;
2130 case PR_SEEK_CUR:
2131 newOffset += aOffset;
2132 break;
2133 case PR_SEEK_SET:
2134 newOffset = aOffset;
2135 break;
2136 default:
2137 NS_ERROR("Unknown whence");
2138 return NS_ERROR_FAILURE;
2139 }
2141 if (newOffset < 0)
2142 return NS_ERROR_FAILURE;
2143 mStreamOffset = newOffset;
2145 CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Seek to %lld", this, (long long)mStreamOffset));
2146 gMediaCache->NoteSeek(this, oldOffset);
2148 gMediaCache->QueueUpdate();
2149 return NS_OK;
2150 }
2152 int64_t
2153 MediaCacheStream::Tell()
2154 {
2155 NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread");
2157 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2158 return mStreamOffset;
2159 }
2161 nsresult
2162 MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
2163 {
2164 NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread");
2166 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2167 if (mClosed)
2168 return NS_ERROR_FAILURE;
2170 uint32_t count = 0;
2171 // Read one block (or part of a block) at a time
2172 while (count < aCount) {
2173 uint32_t streamBlock = uint32_t(mStreamOffset/BLOCK_SIZE);
2174 uint32_t offsetInStreamBlock =
2175 uint32_t(mStreamOffset - streamBlock*BLOCK_SIZE);
2176 int64_t size = std::min(aCount - count, BLOCK_SIZE - offsetInStreamBlock);
2178 if (mStreamLength >= 0) {
2179 // Don't try to read beyond the end of the stream
2180 int64_t bytesRemaining = mStreamLength - mStreamOffset;
2181 if (bytesRemaining <= 0) {
2182 // Get out of here and return NS_OK
2183 break;
2184 }
2185 size = std::min(size, bytesRemaining);
2186 // Clamp size until 64-bit file size issues are fixed.
2187 size = std::min(size, int64_t(INT32_MAX));
2188 }
2190 int32_t cacheBlock = streamBlock < mBlocks.Length() ? mBlocks[streamBlock] : -1;
2191 if (cacheBlock < 0) {
2192 // We don't have a complete cached block here.
2194 if (count > 0) {
2195 // Some data has been read, so return what we've got instead of
2196 // blocking or trying to find a stream with a partial block.
2197 break;
2198 }
2200 // See if the data is available in the partial cache block of any
2201 // stream reading this resource. We need to do this in case there is
2202 // another stream with this resource that has all the data to the end of
2203 // the stream but the data doesn't end on a block boundary.
2204 MediaCacheStream* streamWithPartialBlock = nullptr;
2205 MediaCache::ResourceStreamIterator iter(mResourceID);
2206 while (MediaCacheStream* stream = iter.Next()) {
2207 if (uint32_t(stream->mChannelOffset/BLOCK_SIZE) == streamBlock &&
2208 mStreamOffset < stream->mChannelOffset) {
2209 streamWithPartialBlock = stream;
2210 break;
2211 }
2212 }
2213 if (streamWithPartialBlock) {
2214 // We can just use the data in mPartialBlockBuffer. In fact we should
2215 // use it rather than waiting for the block to fill and land in
2216 // the cache.
2217 int64_t bytes = std::min<int64_t>(size, streamWithPartialBlock->mChannelOffset - mStreamOffset);
2218 // Clamp bytes until 64-bit file size issues are fixed.
2219 bytes = std::min(bytes, int64_t(INT32_MAX));
2220 NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= aCount, "Bytes out of range.");
2221 memcpy(aBuffer,
2222 reinterpret_cast<char*>(streamWithPartialBlock->mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes);
2223 if (mCurrentMode == MODE_METADATA) {
2224 streamWithPartialBlock->mMetadataInPartialBlockBuffer = true;
2225 }
2226 mStreamOffset += bytes;
2227 count = bytes;
2228 break;
2229 }
2231 // No data has been read yet, so block
2232 mon.Wait();
2233 if (mClosed) {
2234 // We may have successfully read some data, but let's just throw
2235 // that out.
2236 return NS_ERROR_FAILURE;
2237 }
2238 continue;
2239 }
2241 gMediaCache->NoteBlockUsage(this, cacheBlock, mCurrentMode, TimeStamp::Now());
2243 int64_t offset = cacheBlock*BLOCK_SIZE + offsetInStreamBlock;
2244 int32_t bytes;
2245 NS_ABORT_IF_FALSE(size >= 0 && size <= INT32_MAX, "Size out of range.");
2246 nsresult rv = gMediaCache->ReadCacheFile(offset, aBuffer + count, int32_t(size), &bytes);
2247 if (NS_FAILED(rv)) {
2248 if (count == 0)
2249 return rv;
2250 // If we did successfully read some data, may as well return it
2251 break;
2252 }
2253 mStreamOffset += bytes;
2254 count += bytes;
2255 }
2257 if (count > 0) {
2258 // Some data was read, so queue an update since block priorities may
2259 // have changed
2260 gMediaCache->QueueUpdate();
2261 }
2262 CACHE_LOG(PR_LOG_DEBUG,
2263 ("Stream %p Read at %lld count=%d", this, (long long)(mStreamOffset-count), count));
2264 *aBytes = count;
2265 return NS_OK;
2266 }
2268 nsresult
2269 MediaCacheStream::ReadAt(int64_t aOffset, char* aBuffer,
2270 uint32_t aCount, uint32_t* aBytes)
2271 {
2272 NS_ASSERTION(!NS_IsMainThread(), "Don't call on main thread");
2274 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2275 nsresult rv = Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
2276 if (NS_FAILED(rv)) return rv;
2277 return Read(aBuffer, aCount, aBytes);
2278 }
2280 nsresult
2281 MediaCacheStream::ReadFromCache(char* aBuffer, int64_t aOffset, int64_t aCount)
2282 {
2283 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2284 if (mClosed)
2285 return NS_ERROR_FAILURE;
2287 // Read one block (or part of a block) at a time
2288 uint32_t count = 0;
2289 int64_t streamOffset = aOffset;
2290 while (count < aCount) {
2291 uint32_t streamBlock = uint32_t(streamOffset/BLOCK_SIZE);
2292 uint32_t offsetInStreamBlock =
2293 uint32_t(streamOffset - streamBlock*BLOCK_SIZE);
2294 int64_t size = std::min<int64_t>(aCount - count, BLOCK_SIZE - offsetInStreamBlock);
2296 if (mStreamLength >= 0) {
2297 // Don't try to read beyond the end of the stream
2298 int64_t bytesRemaining = mStreamLength - streamOffset;
2299 if (bytesRemaining <= 0) {
2300 return NS_ERROR_FAILURE;
2301 }
2302 size = std::min(size, bytesRemaining);
2303 // Clamp size until 64-bit file size issues are fixed.
2304 size = std::min(size, int64_t(INT32_MAX));
2305 }
2307 int32_t bytes;
2308 uint32_t channelBlock = uint32_t(mChannelOffset/BLOCK_SIZE);
2309 int32_t cacheBlock = streamBlock < mBlocks.Length() ? mBlocks[streamBlock] : -1;
2310 if (channelBlock == streamBlock && streamOffset < mChannelOffset) {
2311 // We can just use the data in mPartialBlockBuffer. In fact we should
2312 // use it rather than waiting for the block to fill and land in
2313 // the cache.
2314 // Clamp bytes until 64-bit file size issues are fixed.
2315 int64_t toCopy = std::min<int64_t>(size, mChannelOffset - streamOffset);
2316 bytes = std::min(toCopy, int64_t(INT32_MAX));
2317 NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= toCopy, "Bytes out of range.");
2318 memcpy(aBuffer + count,
2319 reinterpret_cast<char*>(mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes);
2320 } else {
2321 if (cacheBlock < 0) {
2322 // We expect all blocks to be cached! Fail!
2323 return NS_ERROR_FAILURE;
2324 }
2325 int64_t offset = cacheBlock*BLOCK_SIZE + offsetInStreamBlock;
2326 NS_ABORT_IF_FALSE(size >= 0 && size <= INT32_MAX, "Size out of range.");
2327 nsresult rv = gMediaCache->ReadCacheFile(offset, aBuffer + count, int32_t(size), &bytes);
2328 if (NS_FAILED(rv)) {
2329 return rv;
2330 }
2331 }
2332 streamOffset += bytes;
2333 count += bytes;
2334 }
2336 return NS_OK;
2337 }
2339 nsresult
2340 MediaCacheStream::Init()
2341 {
2342 NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
2344 if (mInitialized)
2345 return NS_OK;
2347 InitMediaCache();
2348 if (!gMediaCache)
2349 return NS_ERROR_FAILURE;
2350 gMediaCache->OpenStream(this);
2351 mInitialized = true;
2352 return NS_OK;
2353 }
2355 nsresult
2356 MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal)
2357 {
2358 if (!aOriginal->IsAvailableForSharing())
2359 return NS_ERROR_FAILURE;
2361 if (mInitialized)
2362 return NS_OK;
2364 nsresult rv = Init();
2365 if (NS_FAILED(rv))
2366 return rv;
2367 mResourceID = aOriginal->mResourceID;
2369 // Grab cache blocks from aOriginal as readahead blocks for our stream
2370 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2372 mPrincipal = aOriginal->mPrincipal;
2373 mStreamLength = aOriginal->mStreamLength;
2374 mIsTransportSeekable = aOriginal->mIsTransportSeekable;
2376 // Cloned streams are initially suspended, since there is no channel open
2377 // initially for a clone.
2378 mCacheSuspended = true;
2379 mChannelEnded = true;
2381 if (aOriginal->mDidNotifyDataEnded) {
2382 mNotifyDataEndedStatus = aOriginal->mNotifyDataEndedStatus;
2383 mDidNotifyDataEnded = true;
2384 mClient->CacheClientNotifyDataEnded(mNotifyDataEndedStatus);
2385 }
2387 for (uint32_t i = 0; i < aOriginal->mBlocks.Length(); ++i) {
2388 int32_t cacheBlockIndex = aOriginal->mBlocks[i];
2389 if (cacheBlockIndex < 0)
2390 continue;
2392 while (i >= mBlocks.Length()) {
2393 mBlocks.AppendElement(-1);
2394 }
2395 // Every block is a readahead block for the clone because the clone's initial
2396 // stream offset is zero
2397 gMediaCache->AddBlockOwnerAsReadahead(cacheBlockIndex, this, i);
2398 }
2400 return NS_OK;
2401 }
2403 nsresult MediaCacheStream::GetCachedRanges(nsTArray<MediaByteRange>& aRanges)
2404 {
2405 // Take the monitor, so that the cached data ranges can't grow while we're
2406 // trying to loop over them.
2407 ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
2409 // We must be pinned while running this, otherwise the cached data ranges may
2410 // shrink while we're trying to loop over them.
2411 NS_ASSERTION(mPinCount > 0, "Must be pinned");
2413 int64_t startOffset = GetNextCachedData(0);
2414 while (startOffset >= 0) {
2415 int64_t endOffset = GetCachedDataEnd(startOffset);
2416 NS_ASSERTION(startOffset < endOffset, "Buffered range must end after its start");
2417 // Bytes [startOffset..endOffset] are cached.
2418 aRanges.AppendElement(MediaByteRange(startOffset, endOffset));
2419 startOffset = GetNextCachedData(endOffset);
2420 NS_ASSERTION(startOffset == -1 || startOffset > endOffset,
2421 "Must have advanced to start of next range, or hit end of stream");
2422 }
2423 return NS_OK;
2424 }
2426 } // namespace mozilla