michael@0: /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ michael@0: /* vim:set ts=2 sw=2 sts=2 et cindent: */ michael@0: /* This Source Code Form is subject to the terms of the Mozilla Public michael@0: * License, v. 2.0. If a copy of the MPL was not distributed with this michael@0: * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ michael@0: michael@0: #ifndef MediaCache_h_ michael@0: #define MediaCache_h_ michael@0: michael@0: #include "nsTArray.h" michael@0: #include "nsCOMPtr.h" michael@0: #include "nsHashKeys.h" michael@0: #include "nsTHashtable.h" michael@0: michael@0: class nsIPrincipal; michael@0: michael@0: namespace mozilla { michael@0: // defined in MediaResource.h michael@0: class ChannelMediaResource; michael@0: class MediaByteRange; michael@0: class MediaResource; michael@0: class ReentrantMonitorAutoEnter; michael@0: michael@0: /** michael@0: * Media applications want fast, "on demand" random access to media data, michael@0: * for pausing, seeking, etc. But we are primarily interested michael@0: * in transporting media data using HTTP over the Internet, which has michael@0: * high latency to open a connection, requires a new connection for every michael@0: * seek, may not even support seeking on some connections (especially michael@0: * live streams), and uses a push model --- data comes from the server michael@0: * and you don't have much control over the rate. Also, transferring data michael@0: * over the Internet can be slow and/or unpredictable, so we want to read michael@0: * ahead to buffer and cache as much data as possible. michael@0: * michael@0: * The job of the media cache is to resolve this impedance mismatch. michael@0: * The media cache reads data from Necko channels into file-backed storage, michael@0: * and offers a random-access file-like API to the stream data michael@0: * (MediaCacheStream). Along the way it solves several problems: michael@0: * -- The cache intelligently reads ahead to prefetch data that may be michael@0: * needed in the future michael@0: * -- The size of the cache is bounded so that we don't fill up michael@0: * storage with read-ahead data michael@0: * -- Cache replacement is managed globally so that the most valuable michael@0: * data (across all streams) is retained michael@0: * -- The cache can suspend Necko channels temporarily when their data is michael@0: * not wanted (yet) michael@0: * -- The cache translates file-like seek requests to HTTP seeks, michael@0: * including optimizations like not triggering a new seek if it would michael@0: * be faster to just keep reading until we reach the seek point. The michael@0: * "seek to EOF" idiom to determine file size is also handled efficiently michael@0: * (seeking to EOF and then seeking back to the previous offset does not michael@0: * trigger any Necko activity) michael@0: * -- The cache also handles the case where the server does not support michael@0: * seeking michael@0: * -- Necko can only send data to the main thread, but MediaCacheStream michael@0: * can distribute data to any thread michael@0: * -- The cache exposes APIs so clients can detect what data is michael@0: * currently held michael@0: * michael@0: * Note that although HTTP is the most important transport and we only michael@0: * support transport-level seeking via HTTP byte-ranges, the media cache michael@0: * works with any kind of Necko channels and provides random access to michael@0: * cached data even for, e.g., FTP streams. michael@0: * michael@0: * The media cache is not persistent. It does not currently allow michael@0: * data from one load to be used by other loads, either within the same michael@0: * browser session or across browser sessions. The media cache file michael@0: * is marked "delete on close" so it will automatically disappear in the michael@0: * event of a browser crash or shutdown. michael@0: * michael@0: * The media cache is block-based. Streams are divided into blocks of a michael@0: * fixed size (currently 4K) and we cache blocks. A single cache contains michael@0: * blocks for all streams. michael@0: * michael@0: * The cache size is controlled by the media.cache_size preference michael@0: * (which is in KB). The default size is 500MB. michael@0: * michael@0: * The replacement policy predicts a "time of next use" for each block michael@0: * in the cache. When we need to free a block, the block with the latest michael@0: * "time of next use" will be evicted. Blocks are divided into michael@0: * different classes, each class having its own predictor: michael@0: * FREE_BLOCK: these blocks are effectively infinitely far in the future; michael@0: * a free block will always be chosen for replacement before other classes michael@0: * of blocks. michael@0: * METADATA_BLOCK: these are blocks that contain data that has been read michael@0: * by the decoder in "metadata mode", e.g. while the decoder is searching michael@0: * the stream during a seek operation. These blocks are managed with an michael@0: * LRU policy; the "time of next use" is predicted to be as far in the michael@0: * future as the last use was in the past. michael@0: * PLAYED_BLOCK: these are blocks that have not been read in "metadata michael@0: * mode", and contain data behind the current decoder read point. (They michael@0: * may not actually have been read by the decoder, if the decoder seeked michael@0: * forward.) These blocks are managed with an LRU policy except that we add michael@0: * REPLAY_DELAY seconds of penalty to their predicted "time of next use", michael@0: * to reflect the uncertainty about whether replay will actually happen michael@0: * or not. michael@0: * READAHEAD_BLOCK: these are blocks that have not been read in michael@0: * "metadata mode" and that are entirely ahead of the current decoder michael@0: * read point. (They may actually have been read by the decoder in the michael@0: * past if the decoder has since seeked backward.) We predict the michael@0: * time of next use for these blocks by assuming steady playback and michael@0: * dividing the number of bytes between the block and the current decoder michael@0: * read point by the decoder's estimate of its playback rate in bytes michael@0: * per second. This ensures that the blocks farthest ahead are considered michael@0: * least valuable. michael@0: * For efficient prediction of the "latest time of next use", we maintain michael@0: * linked lists of blocks in each class, ordering blocks by time of michael@0: * next use. READAHEAD_BLOCKS have one linked list per stream, since their michael@0: * time of next use depends on stream parameters, but the other lists michael@0: * are global. michael@0: * michael@0: * A block containing a current decoder read point can contain data michael@0: * both behind and ahead of the read point. It will be classified as a michael@0: * PLAYED_BLOCK but we will give it special treatment so it is never michael@0: * evicted --- it actually contains the highest-priority readahead data michael@0: * as well as played data. michael@0: * michael@0: * "Time of next use" estimates are also used for flow control. When michael@0: * reading ahead we can predict the time of next use for the data that michael@0: * will be read. If the predicted time of next use is later then the michael@0: * prediction for all currently cached blocks, and the cache is full, then michael@0: * we should suspend reading from the Necko channel. michael@0: * michael@0: * Unfortunately suspending the Necko channel can't immediately stop the michael@0: * flow of data from the server. First our desire to suspend has to be michael@0: * transmitted to the server (in practice, Necko stops reading from the michael@0: * socket, which causes the kernel to shrink its advertised TCP receive michael@0: * window size to zero). Then the server can stop sending the data, but michael@0: * we will receive data roughly corresponding to the product of the link michael@0: * bandwidth multiplied by the round-trip latency. We deal with this by michael@0: * letting the cache overflow temporarily and then trimming it back by michael@0: * moving overflowing blocks back into the body of the cache, replacing michael@0: * less valuable blocks as they become available. We try to avoid simply michael@0: * discarding overflowing readahead data. michael@0: * michael@0: * All changes to the actual contents of the cache happen on the main michael@0: * thread, since that's where Necko's notifications happen. michael@0: * michael@0: * The media cache maintains at most one Necko channel for each stream. michael@0: * (In the future it might be advantageous to relax this, e.g. so that a michael@0: * seek to near the end of the file can happen without disturbing michael@0: * the loading of data from the beginning of the file.) The Necko channel michael@0: * is managed through ChannelMediaResource; MediaCache does not michael@0: * depend on Necko directly. michael@0: * michael@0: * Every time something changes that might affect whether we want to michael@0: * read from a Necko channel, or whether we want to seek on the Necko michael@0: * channel --- such as data arriving or data being consumed by the michael@0: * decoder --- we asynchronously trigger MediaCache::Update on the main michael@0: * thread. That method implements most cache policy. It evaluates for michael@0: * each stream whether we want to suspend or resume the stream and what michael@0: * offset we should seek to, if any. It is also responsible for trimming michael@0: * back the cache size to its desired limit by moving overflowing blocks michael@0: * into the main part of the cache. michael@0: * michael@0: * Streams can be opened in non-seekable mode. In non-seekable mode, michael@0: * the cache will only call ChannelMediaResource::CacheClientSeek with michael@0: * a 0 offset. The cache tries hard not to discard readahead data michael@0: * for non-seekable streams, since that could trigger a potentially michael@0: * disastrous re-read of the entire stream. It's up to cache clients michael@0: * to try to avoid requesting seeks on such streams. michael@0: * michael@0: * MediaCache has a single internal monitor for all synchronization. michael@0: * This is treated as the lowest level monitor in the media code. So, michael@0: * we must not acquire any MediaDecoder locks or MediaResource locks michael@0: * while holding the MediaCache lock. But it's OK to hold those locks michael@0: * and then get the MediaCache lock. michael@0: * michael@0: * MediaCache associates a principal with each stream. CacheClientSeek michael@0: * can trigger new HTTP requests; due to redirects to other domains, michael@0: * each HTTP load can return data with a different principal. This michael@0: * principal must be passed to NotifyDataReceived, and MediaCache michael@0: * will detect when different principals are associated with data in the michael@0: * same stream, and replace them with a null principal. michael@0: */ michael@0: class MediaCache; michael@0: michael@0: /** michael@0: * If the cache fails to initialize then Init will fail, so nonstatic michael@0: * methods of this class can assume gMediaCache is non-null. michael@0: * michael@0: * This class can be directly embedded as a value. michael@0: */ michael@0: class MediaCacheStream { michael@0: public: michael@0: enum { michael@0: // This needs to be a power of two michael@0: BLOCK_SIZE = 32768 michael@0: }; michael@0: enum ReadMode { michael@0: MODE_METADATA, michael@0: MODE_PLAYBACK michael@0: }; michael@0: michael@0: // aClient provides the underlying transport that cache will use to read michael@0: // data for this stream. michael@0: MediaCacheStream(ChannelMediaResource* aClient); michael@0: ~MediaCacheStream(); michael@0: michael@0: // Set up this stream with the cache. Can fail on OOM. One michael@0: // of InitAsClone or Init must be called before any other method on michael@0: // this class. Does nothing if already initialized. michael@0: nsresult Init(); michael@0: michael@0: // Set up this stream with the cache, assuming it's for the same data michael@0: // as the aOriginal stream. Can fail on OOM. Exactly one michael@0: // of InitAsClone or Init must be called before any other method on michael@0: // this class. Does nothing if already initialized. michael@0: nsresult InitAsClone(MediaCacheStream* aOriginal); michael@0: michael@0: // These are called on the main thread. michael@0: // Tell us whether the stream is seekable or not. Non-seekable streams michael@0: // will always pass 0 for aOffset to CacheClientSeek. This should only michael@0: // be called while the stream is at channel offset 0. Seekability can michael@0: // change during the lifetime of the MediaCacheStream --- every time michael@0: // we do an HTTP load the seekability may be different (and sometimes michael@0: // is, in practice, due to the effects of caching proxies). michael@0: void SetTransportSeekable(bool aIsTransportSeekable); michael@0: // This must be called (and return) before the ChannelMediaResource michael@0: // used to create this MediaCacheStream is deleted. michael@0: void Close(); michael@0: // This returns true when the stream has been closed michael@0: bool IsClosed() const { return mClosed; } michael@0: // Returns true when this stream is can be shared by a new resource load michael@0: bool IsAvailableForSharing() const michael@0: { michael@0: return !mClosed && michael@0: (!mDidNotifyDataEnded || NS_SUCCEEDED(mNotifyDataEndedStatus)); michael@0: } michael@0: // Get the principal for this stream. Anything accessing the contents of michael@0: // this stream must have a principal that subsumes this principal. michael@0: nsIPrincipal* GetCurrentPrincipal() { return mPrincipal; } michael@0: // Ensure a global media cache update has run with this stream present. michael@0: // This ensures the cache has had a chance to suspend or unsuspend this stream. michael@0: // Called only on main thread. This can change the state of streams, fire michael@0: // notifications, etc. michael@0: void EnsureCacheUpdate(); michael@0: michael@0: // These callbacks are called on the main thread by the client michael@0: // when data has been received via the channel. michael@0: // Tells the cache what the server said the data length is going to be. michael@0: // The actual data length may be greater (we receive more data than michael@0: // specified) or smaller (the stream ends before we reach the given michael@0: // length), because servers can lie. The server's reported data length michael@0: // *and* the actual data length can even vary over time because a michael@0: // misbehaving server may feed us a different stream after each seek michael@0: // operation. So this is really just a hint. The cache may however michael@0: // stop reading (suspend the channel) when it thinks we've read all the michael@0: // data available based on an incorrect reported length. Seeks relative michael@0: // EOF also depend on the reported length if we haven't managed to michael@0: // read the whole stream yet. michael@0: void NotifyDataLength(int64_t aLength); michael@0: // Notifies the cache that a load has begun. We pass the offset michael@0: // because in some cases the offset might not be what the cache michael@0: // requested. In particular we might unexpectedly start providing michael@0: // data at offset 0. This need not be called if the offset is the michael@0: // offset that the cache requested in michael@0: // ChannelMediaResource::CacheClientSeek. This can be called at any michael@0: // time by the client, not just after a CacheClientSeek. michael@0: void NotifyDataStarted(int64_t aOffset); michael@0: // Notifies the cache that data has been received. The stream already michael@0: // knows the offset because data is received in sequence and michael@0: // the starting offset is known via NotifyDataStarted or because michael@0: // the cache requested the offset in michael@0: // ChannelMediaResource::CacheClientSeek, or because it defaulted to 0. michael@0: // We pass in the principal that was used to load this data. michael@0: void NotifyDataReceived(int64_t aSize, const char* aData, michael@0: nsIPrincipal* aPrincipal); michael@0: // Notifies the cache that the current bytes should be written to disk. michael@0: // Called on the main thread. michael@0: void FlushPartialBlock(); michael@0: // Notifies the cache that the channel has closed with the given status. michael@0: void NotifyDataEnded(nsresult aStatus); michael@0: michael@0: // These methods can be called on any thread. michael@0: // Cached blocks associated with this stream will not be evicted michael@0: // while the stream is pinned. michael@0: void Pin(); michael@0: void Unpin(); michael@0: // See comments above for NotifyDataLength about how the length michael@0: // can vary over time. Returns -1 if no length is known. Returns the michael@0: // reported length if we haven't got any better information. If michael@0: // the stream ended normally we return the length we actually got. michael@0: // If we've successfully read data beyond the originally reported length, michael@0: // we return the end of the data we've read. michael@0: int64_t GetLength(); michael@0: // Returns the unique resource ID. Call only on the main thread or while michael@0: // holding the media cache lock. michael@0: int64_t GetResourceID() { return mResourceID; } michael@0: // Returns the end of the bytes starting at the given offset michael@0: // which are in cache. michael@0: int64_t GetCachedDataEnd(int64_t aOffset); michael@0: // Returns the offset of the first byte of cached data at or after aOffset, michael@0: // or -1 if there is no such cached data. michael@0: int64_t GetNextCachedData(int64_t aOffset); michael@0: // Fills aRanges with the ByteRanges representing the data which is currently michael@0: // cached. Locks the media cache while running, to prevent any ranges michael@0: // growing. The stream should be pinned while this runs and while its results michael@0: // are used, to ensure no data is evicted. michael@0: nsresult GetCachedRanges(nsTArray& aRanges); michael@0: michael@0: // Reads from buffered data only. Will fail if not all data to be read is michael@0: // in the cache. Will not mark blocks as read. Can be called from the main michael@0: // thread. It's the caller's responsibility to wrap the call in a pin/unpin, michael@0: // and also to check that the range they want is cached before calling this. michael@0: nsresult ReadFromCache(char* aBuffer, michael@0: int64_t aOffset, michael@0: int64_t aCount); michael@0: michael@0: // IsDataCachedToEndOfStream returns true if all the data from michael@0: // aOffset to the end of the stream (the server-reported end, if the michael@0: // real end is not known) is in cache. If we know nothing about the michael@0: // end of the stream, this returns false. michael@0: bool IsDataCachedToEndOfStream(int64_t aOffset); michael@0: // The mode is initially MODE_PLAYBACK. michael@0: void SetReadMode(ReadMode aMode); michael@0: // This is the client's estimate of the playback rate assuming michael@0: // the media plays continuously. The cache can't guess this itself michael@0: // because it doesn't know when the decoder was paused, buffering, etc. michael@0: // Do not pass zero. michael@0: void SetPlaybackRate(uint32_t aBytesPerSecond); michael@0: // Returns the last set value of SetTransportSeekable. michael@0: bool IsTransportSeekable(); michael@0: michael@0: // Returns true when all streams for this resource are suspended or their michael@0: // channel has ended. michael@0: bool AreAllStreamsForResourceSuspended(); michael@0: michael@0: // These methods must be called on a different thread from the main michael@0: // thread. They should always be called on the same thread for a given michael@0: // stream. michael@0: // This can fail when aWhence is NS_SEEK_END and no stream length michael@0: // is known. michael@0: nsresult Seek(int32_t aWhence, int64_t aOffset); michael@0: int64_t Tell(); michael@0: // *aBytes gets the number of bytes that were actually read. This can michael@0: // be less than aCount. If the first byte of data is not in the cache, michael@0: // this will block until the data is available or the stream is michael@0: // closed, otherwise it won't block. michael@0: nsresult Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes); michael@0: // Seeks to aOffset in the stream then performs a Read operation. See michael@0: // 'Read' for argument and return details. michael@0: nsresult ReadAt(int64_t aOffset, char* aBuffer, michael@0: uint32_t aCount, uint32_t* aBytes); michael@0: michael@0: size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const; michael@0: michael@0: private: michael@0: friend class MediaCache; michael@0: michael@0: /** michael@0: * A doubly-linked list of blocks. Add/Remove/Get methods are all michael@0: * constant time. We declare this here so that a stream can contain a michael@0: * BlockList of its read-ahead blocks. Blocks are referred to by index michael@0: * into the MediaCache::mIndex array. michael@0: * michael@0: * Blocks can belong to more than one list at the same time, because michael@0: * the next/prev pointers are not stored in the block. michael@0: */ michael@0: class BlockList { michael@0: public: michael@0: BlockList() : mFirstBlock(-1), mCount(0) {} michael@0: ~BlockList() { michael@0: NS_ASSERTION(mFirstBlock == -1 && mCount == 0, michael@0: "Destroying non-empty block list"); michael@0: } michael@0: void AddFirstBlock(int32_t aBlock); michael@0: void AddAfter(int32_t aBlock, int32_t aBefore); michael@0: void RemoveBlock(int32_t aBlock); michael@0: // Returns the first block in the list, or -1 if empty michael@0: int32_t GetFirstBlock() const { return mFirstBlock; } michael@0: // Returns the last block in the list, or -1 if empty michael@0: int32_t GetLastBlock() const; michael@0: // Returns the next block in the list after aBlock or -1 if michael@0: // aBlock is the last block michael@0: int32_t GetNextBlock(int32_t aBlock) const; michael@0: // Returns the previous block in the list before aBlock or -1 if michael@0: // aBlock is the first block michael@0: int32_t GetPrevBlock(int32_t aBlock) const; michael@0: bool IsEmpty() const { return mFirstBlock < 0; } michael@0: int32_t GetCount() const { return mCount; } michael@0: // The contents of aBlockIndex1 and aBlockIndex2 have been swapped michael@0: void NotifyBlockSwapped(int32_t aBlockIndex1, int32_t aBlockIndex2); michael@0: #ifdef DEBUG michael@0: // Verify linked-list invariants michael@0: void Verify(); michael@0: #else michael@0: void Verify() {} michael@0: #endif michael@0: michael@0: size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; michael@0: michael@0: private: michael@0: struct Entry : public nsUint32HashKey { michael@0: Entry(KeyTypePointer aKey) : nsUint32HashKey(aKey) { } michael@0: Entry(const Entry& toCopy) : nsUint32HashKey(&toCopy.GetKey()), michael@0: mNextBlock(toCopy.mNextBlock), mPrevBlock(toCopy.mPrevBlock) {} michael@0: michael@0: int32_t mNextBlock; michael@0: int32_t mPrevBlock; michael@0: }; michael@0: nsTHashtable mEntries; michael@0: michael@0: // The index of the first block in the list, or -1 if the list is empty. michael@0: int32_t mFirstBlock; michael@0: // The number of blocks in the list. michael@0: int32_t mCount; michael@0: }; michael@0: michael@0: // Returns the end of the bytes starting at the given offset michael@0: // which are in cache. michael@0: // This method assumes that the cache monitor is held and can be called on michael@0: // any thread. michael@0: int64_t GetCachedDataEndInternal(int64_t aOffset); michael@0: // Returns the offset of the first byte of cached data at or after aOffset, michael@0: // or -1 if there is no such cached data. michael@0: // This method assumes that the cache monitor is held and can be called on michael@0: // any thread. michael@0: int64_t GetNextCachedDataInternal(int64_t aOffset); michael@0: // Writes |mPartialBlock| to disk. michael@0: // Used by |NotifyDataEnded| and |FlushPartialBlock|. michael@0: // If |aNotifyAll| is true, this function will wake up readers who may be michael@0: // waiting on the media cache monitor. Called on the main thread only. michael@0: void FlushPartialBlockInternal(bool aNotify); michael@0: // A helper function to do the work of closing the stream. Assumes michael@0: // that the cache monitor is held. Main thread only. michael@0: // aReentrantMonitor is the nsAutoReentrantMonitor wrapper holding the cache monitor. michael@0: // This is used to NotifyAll to wake up threads that might be michael@0: // blocked on reading from this stream. michael@0: void CloseInternal(ReentrantMonitorAutoEnter& aReentrantMonitor); michael@0: // Update mPrincipal given that data has been received from aPrincipal michael@0: bool UpdatePrincipal(nsIPrincipal* aPrincipal); michael@0: michael@0: // These fields are main-thread-only. michael@0: ChannelMediaResource* mClient; michael@0: nsCOMPtr mPrincipal; michael@0: // Set to true when Init or InitAsClone has been called michael@0: bool mInitialized; michael@0: // Set to true when MediaCache::Update() has finished while this stream michael@0: // was present. michael@0: bool mHasHadUpdate; michael@0: // Set to true when the stream has been closed either explicitly or michael@0: // due to an internal cache error michael@0: bool mClosed; michael@0: // True if CacheClientNotifyDataEnded has been called for this stream. michael@0: bool mDidNotifyDataEnded; michael@0: michael@0: // The following fields must be written holding the cache's monitor and michael@0: // only on the main thread, thus can be read either on the main thread michael@0: // or while holding the cache's monitor. michael@0: michael@0: // This is a unique ID representing the resource we're loading. michael@0: // All streams with the same mResourceID are loading the same michael@0: // underlying resource and should share data. michael@0: int64_t mResourceID; michael@0: // The last reported seekability state for the underlying channel michael@0: bool mIsTransportSeekable; michael@0: // True if the cache has suspended our channel because the cache is michael@0: // full and the priority of the data that would be received is lower michael@0: // than the priority of the data already in the cache michael@0: bool mCacheSuspended; michael@0: // True if the channel ended and we haven't seeked it again. michael@0: bool mChannelEnded; michael@0: // The offset where the next data from the channel will arrive michael@0: int64_t mChannelOffset; michael@0: // The reported or discovered length of the data, or -1 if nothing is michael@0: // known michael@0: int64_t mStreamLength; michael@0: michael@0: // The following fields are protected by the cache's monitor can can be written michael@0: // by any thread. michael@0: michael@0: // The offset where the reader is positioned in the stream michael@0: int64_t mStreamOffset; michael@0: // For each block in the stream data, maps to the cache entry for the michael@0: // block, or -1 if the block is not cached. michael@0: nsTArray mBlocks; michael@0: // The list of read-ahead blocks, ordered by stream offset; the first michael@0: // block is the earliest in the stream (so the last block will be the michael@0: // least valuable). michael@0: BlockList mReadaheadBlocks; michael@0: // The list of metadata blocks; the first block is the most recently used michael@0: BlockList mMetadataBlocks; michael@0: // The list of played-back blocks; the first block is the most recently used michael@0: BlockList mPlayedBlocks; michael@0: // The last reported estimate of the decoder's playback rate michael@0: uint32_t mPlaybackBytesPerSecond; michael@0: // The number of times this stream has been Pinned without a michael@0: // corresponding Unpin michael@0: uint32_t mPinCount; michael@0: // The status used when we did CacheClientNotifyDataEnded. Only valid michael@0: // when mDidNotifyDataEnded is true. michael@0: nsresult mNotifyDataEndedStatus; michael@0: // The last reported read mode michael@0: ReadMode mCurrentMode; michael@0: // True if some data in mPartialBlockBuffer has been read as metadata michael@0: bool mMetadataInPartialBlockBuffer; michael@0: michael@0: // The following field is protected by the cache's monitor but are michael@0: // only written on the main thread. michael@0: michael@0: // Data received for the block containing mChannelOffset. Data needs michael@0: // to wait here so we can write back a complete block. The first michael@0: // mChannelOffset%BLOCK_SIZE bytes have been filled in with good data, michael@0: // the rest are garbage. michael@0: // Use int64_t so that the data is well-aligned. michael@0: // Heap allocate this buffer since the exact power-of-2 will cause allocation michael@0: // slop when combined with the rest of the object members. michael@0: nsAutoArrayPtr mPartialBlockBuffer; michael@0: }; michael@0: michael@0: } // namespace mozilla michael@0: michael@0: #endif