Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
michael@0 | 2 | /* vim:set ts=4 sw=4 sts=4 cin et: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "nsCache.h" |
michael@0 | 8 | #include "nsDiskCacheMap.h" |
michael@0 | 9 | #include "nsDiskCacheBinding.h" |
michael@0 | 10 | #include "nsDiskCacheEntry.h" |
michael@0 | 11 | #include "nsDiskCacheDevice.h" |
michael@0 | 12 | #include "nsCacheService.h" |
michael@0 | 13 | |
michael@0 | 14 | #include <string.h> |
michael@0 | 15 | #include "nsPrintfCString.h" |
michael@0 | 16 | |
michael@0 | 17 | #include "nsISerializable.h" |
michael@0 | 18 | #include "nsSerializationHelper.h" |
michael@0 | 19 | |
michael@0 | 20 | #include "mozilla/MemoryReporting.h" |
michael@0 | 21 | #include "mozilla/Telemetry.h" |
michael@0 | 22 | #include "mozilla/VisualEventTracer.h" |
michael@0 | 23 | #include <algorithm> |
michael@0 | 24 | |
michael@0 | 25 | using namespace mozilla; |
michael@0 | 26 | |
michael@0 | 27 | /****************************************************************************** |
michael@0 | 28 | * nsDiskCacheMap |
michael@0 | 29 | *****************************************************************************/ |
michael@0 | 30 | |
michael@0 | 31 | /** |
michael@0 | 32 | * File operations |
michael@0 | 33 | */ |
michael@0 | 34 | |
michael@0 | 35 | nsresult |
michael@0 | 36 | nsDiskCacheMap::Open(nsIFile * cacheDirectory, |
michael@0 | 37 | nsDiskCache::CorruptCacheInfo * corruptInfo, |
michael@0 | 38 | bool reportCacheCleanTelemetryData) |
michael@0 | 39 | { |
michael@0 | 40 | NS_ENSURE_ARG_POINTER(corruptInfo); |
michael@0 | 41 | |
michael@0 | 42 | // Assume we have an unexpected error until we find otherwise. |
michael@0 | 43 | *corruptInfo = nsDiskCache::kUnexpectedError; |
michael@0 | 44 | NS_ENSURE_ARG_POINTER(cacheDirectory); |
michael@0 | 45 | if (mMapFD) return NS_ERROR_ALREADY_INITIALIZED; |
michael@0 | 46 | |
michael@0 | 47 | mCacheDirectory = cacheDirectory; // save a reference for ourselves |
michael@0 | 48 | |
michael@0 | 49 | // create nsIFile for _CACHE_MAP_ |
michael@0 | 50 | nsresult rv; |
michael@0 | 51 | nsCOMPtr<nsIFile> file; |
michael@0 | 52 | rv = cacheDirectory->Clone(getter_AddRefs(file)); |
michael@0 | 53 | rv = file->AppendNative(NS_LITERAL_CSTRING("_CACHE_MAP_")); |
michael@0 | 54 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 55 | |
michael@0 | 56 | // open the file - restricted to user, the data could be confidential |
michael@0 | 57 | rv = file->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE, 00600, &mMapFD); |
michael@0 | 58 | if (NS_FAILED(rv)) { |
michael@0 | 59 | *corruptInfo = nsDiskCache::kOpenCacheMapError; |
michael@0 | 60 | NS_WARNING("Could not open cache map file"); |
michael@0 | 61 | return NS_ERROR_FILE_CORRUPTED; |
michael@0 | 62 | } |
michael@0 | 63 | |
michael@0 | 64 | bool cacheFilesExist = CacheFilesExist(); |
michael@0 | 65 | rv = NS_ERROR_FILE_CORRUPTED; // presume the worst |
michael@0 | 66 | uint32_t mapSize = PR_Available(mMapFD); |
michael@0 | 67 | |
michael@0 | 68 | if (NS_FAILED(InitCacheClean(cacheDirectory, |
michael@0 | 69 | corruptInfo, |
michael@0 | 70 | reportCacheCleanTelemetryData))) { |
michael@0 | 71 | // corruptInfo is set in the call to InitCacheClean |
michael@0 | 72 | goto error_exit; |
michael@0 | 73 | } |
michael@0 | 74 | |
michael@0 | 75 | // check size of map file |
michael@0 | 76 | if (mapSize == 0) { // creating a new _CACHE_MAP_ |
michael@0 | 77 | |
michael@0 | 78 | // block files shouldn't exist if we're creating the _CACHE_MAP_ |
michael@0 | 79 | if (cacheFilesExist) { |
michael@0 | 80 | *corruptInfo = nsDiskCache::kBlockFilesShouldNotExist; |
michael@0 | 81 | goto error_exit; |
michael@0 | 82 | } |
michael@0 | 83 | |
michael@0 | 84 | if (NS_FAILED(CreateCacheSubDirectories())) { |
michael@0 | 85 | *corruptInfo = nsDiskCache::kCreateCacheSubdirectories; |
michael@0 | 86 | goto error_exit; |
michael@0 | 87 | } |
michael@0 | 88 | |
michael@0 | 89 | // create the file - initialize in memory |
michael@0 | 90 | memset(&mHeader, 0, sizeof(nsDiskCacheHeader)); |
michael@0 | 91 | mHeader.mVersion = nsDiskCache::kCurrentVersion; |
michael@0 | 92 | mHeader.mRecordCount = kMinRecordCount; |
michael@0 | 93 | mRecordArray = (nsDiskCacheRecord *) |
michael@0 | 94 | PR_CALLOC(mHeader.mRecordCount * sizeof(nsDiskCacheRecord)); |
michael@0 | 95 | if (!mRecordArray) { |
michael@0 | 96 | *corruptInfo = nsDiskCache::kOutOfMemory; |
michael@0 | 97 | rv = NS_ERROR_OUT_OF_MEMORY; |
michael@0 | 98 | goto error_exit; |
michael@0 | 99 | } |
michael@0 | 100 | } else if (mapSize >= sizeof(nsDiskCacheHeader)) { // read existing _CACHE_MAP_ |
michael@0 | 101 | |
michael@0 | 102 | // if _CACHE_MAP_ exists, so should the block files |
michael@0 | 103 | if (!cacheFilesExist) { |
michael@0 | 104 | *corruptInfo = nsDiskCache::kBlockFilesShouldExist; |
michael@0 | 105 | goto error_exit; |
michael@0 | 106 | } |
michael@0 | 107 | |
michael@0 | 108 | CACHE_LOG_DEBUG(("CACHE: nsDiskCacheMap::Open [this=%p] reading map", this)); |
michael@0 | 109 | |
michael@0 | 110 | // read the header |
michael@0 | 111 | uint32_t bytesRead = PR_Read(mMapFD, &mHeader, sizeof(nsDiskCacheHeader)); |
michael@0 | 112 | if (sizeof(nsDiskCacheHeader) != bytesRead) { |
michael@0 | 113 | *corruptInfo = nsDiskCache::kHeaderSizeNotRead; |
michael@0 | 114 | goto error_exit; |
michael@0 | 115 | } |
michael@0 | 116 | mHeader.Unswap(); |
michael@0 | 117 | |
michael@0 | 118 | if (mHeader.mIsDirty) { |
michael@0 | 119 | *corruptInfo = nsDiskCache::kHeaderIsDirty; |
michael@0 | 120 | goto error_exit; |
michael@0 | 121 | } |
michael@0 | 122 | |
michael@0 | 123 | if (mHeader.mVersion != nsDiskCache::kCurrentVersion) { |
michael@0 | 124 | *corruptInfo = nsDiskCache::kVersionMismatch; |
michael@0 | 125 | goto error_exit; |
michael@0 | 126 | } |
michael@0 | 127 | |
michael@0 | 128 | uint32_t recordArraySize = |
michael@0 | 129 | mHeader.mRecordCount * sizeof(nsDiskCacheRecord); |
michael@0 | 130 | if (mapSize < recordArraySize + sizeof(nsDiskCacheHeader)) { |
michael@0 | 131 | *corruptInfo = nsDiskCache::kRecordsIncomplete; |
michael@0 | 132 | goto error_exit; |
michael@0 | 133 | } |
michael@0 | 134 | |
michael@0 | 135 | // Get the space for the records |
michael@0 | 136 | mRecordArray = (nsDiskCacheRecord *) PR_MALLOC(recordArraySize); |
michael@0 | 137 | if (!mRecordArray) { |
michael@0 | 138 | *corruptInfo = nsDiskCache::kOutOfMemory; |
michael@0 | 139 | rv = NS_ERROR_OUT_OF_MEMORY; |
michael@0 | 140 | goto error_exit; |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | // Read the records |
michael@0 | 144 | bytesRead = PR_Read(mMapFD, mRecordArray, recordArraySize); |
michael@0 | 145 | if (bytesRead < recordArraySize) { |
michael@0 | 146 | *corruptInfo = nsDiskCache::kNotEnoughToRead; |
michael@0 | 147 | goto error_exit; |
michael@0 | 148 | } |
michael@0 | 149 | |
michael@0 | 150 | // Unswap each record |
michael@0 | 151 | int32_t total = 0; |
michael@0 | 152 | for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
michael@0 | 153 | if (mRecordArray[i].HashNumber()) { |
michael@0 | 154 | #if defined(IS_LITTLE_ENDIAN) |
michael@0 | 155 | mRecordArray[i].Unswap(); |
michael@0 | 156 | #endif |
michael@0 | 157 | total ++; |
michael@0 | 158 | } |
michael@0 | 159 | } |
michael@0 | 160 | |
michael@0 | 161 | // verify entry count |
michael@0 | 162 | if (total != mHeader.mEntryCount) { |
michael@0 | 163 | *corruptInfo = nsDiskCache::kEntryCountIncorrect; |
michael@0 | 164 | goto error_exit; |
michael@0 | 165 | } |
michael@0 | 166 | |
michael@0 | 167 | } else { |
michael@0 | 168 | *corruptInfo = nsDiskCache::kHeaderIncomplete; |
michael@0 | 169 | goto error_exit; |
michael@0 | 170 | } |
michael@0 | 171 | |
michael@0 | 172 | rv = OpenBlockFiles(corruptInfo); |
michael@0 | 173 | if (NS_FAILED(rv)) { |
michael@0 | 174 | // corruptInfo is set in the call to OpenBlockFiles |
michael@0 | 175 | goto error_exit; |
michael@0 | 176 | } |
michael@0 | 177 | |
michael@0 | 178 | // set dirty bit and flush header |
michael@0 | 179 | mHeader.mIsDirty = true; |
michael@0 | 180 | rv = FlushHeader(); |
michael@0 | 181 | if (NS_FAILED(rv)) { |
michael@0 | 182 | *corruptInfo = nsDiskCache::kFlushHeaderError; |
michael@0 | 183 | goto error_exit; |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | Telemetry::Accumulate(Telemetry::HTTP_DISK_CACHE_OVERHEAD, |
michael@0 | 187 | (uint32_t)SizeOfExcludingThis(moz_malloc_size_of)); |
michael@0 | 188 | |
michael@0 | 189 | *corruptInfo = nsDiskCache::kNotCorrupt; |
michael@0 | 190 | return NS_OK; |
michael@0 | 191 | |
michael@0 | 192 | error_exit: |
michael@0 | 193 | (void) Close(false); |
michael@0 | 194 | |
michael@0 | 195 | return rv; |
michael@0 | 196 | } |
michael@0 | 197 | |
michael@0 | 198 | |
michael@0 | 199 | nsresult |
michael@0 | 200 | nsDiskCacheMap::Close(bool flush) |
michael@0 | 201 | { |
michael@0 | 202 | nsCacheService::AssertOwnsLock(); |
michael@0 | 203 | nsresult rv = NS_OK; |
michael@0 | 204 | |
michael@0 | 205 | // Cancel any pending cache validation event, the FlushRecords call below |
michael@0 | 206 | // will validate the cache. |
michael@0 | 207 | if (mCleanCacheTimer) { |
michael@0 | 208 | mCleanCacheTimer->Cancel(); |
michael@0 | 209 | } |
michael@0 | 210 | |
michael@0 | 211 | // If cache map file and its block files are still open, close them |
michael@0 | 212 | if (mMapFD) { |
michael@0 | 213 | // close block files |
michael@0 | 214 | rv = CloseBlockFiles(flush); |
michael@0 | 215 | if (NS_SUCCEEDED(rv) && flush && mRecordArray) { |
michael@0 | 216 | // write the map records |
michael@0 | 217 | rv = FlushRecords(false); // don't bother swapping buckets back |
michael@0 | 218 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 219 | // clear dirty bit |
michael@0 | 220 | mHeader.mIsDirty = false; |
michael@0 | 221 | rv = FlushHeader(); |
michael@0 | 222 | } |
michael@0 | 223 | } |
michael@0 | 224 | if ((PR_Close(mMapFD) != PR_SUCCESS) && (NS_SUCCEEDED(rv))) |
michael@0 | 225 | rv = NS_ERROR_UNEXPECTED; |
michael@0 | 226 | |
michael@0 | 227 | mMapFD = nullptr; |
michael@0 | 228 | } |
michael@0 | 229 | |
michael@0 | 230 | if (mCleanFD) { |
michael@0 | 231 | PR_Close(mCleanFD); |
michael@0 | 232 | mCleanFD = nullptr; |
michael@0 | 233 | } |
michael@0 | 234 | |
michael@0 | 235 | PR_FREEIF(mRecordArray); |
michael@0 | 236 | PR_FREEIF(mBuffer); |
michael@0 | 237 | mBufferSize = 0; |
michael@0 | 238 | return rv; |
michael@0 | 239 | } |
michael@0 | 240 | |
michael@0 | 241 | |
michael@0 | 242 | nsresult |
michael@0 | 243 | nsDiskCacheMap::Trim() |
michael@0 | 244 | { |
michael@0 | 245 | nsresult rv, rv2 = NS_OK; |
michael@0 | 246 | for (int i=0; i < kNumBlockFiles; ++i) { |
michael@0 | 247 | rv = mBlockFile[i].Trim(); |
michael@0 | 248 | if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
michael@0 | 249 | } |
michael@0 | 250 | // Try to shrink the records array |
michael@0 | 251 | rv = ShrinkRecords(); |
michael@0 | 252 | if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
michael@0 | 253 | return rv2; |
michael@0 | 254 | } |
michael@0 | 255 | |
michael@0 | 256 | |
michael@0 | 257 | nsresult |
michael@0 | 258 | nsDiskCacheMap::FlushHeader() |
michael@0 | 259 | { |
michael@0 | 260 | if (!mMapFD) return NS_ERROR_NOT_AVAILABLE; |
michael@0 | 261 | |
michael@0 | 262 | // seek to beginning of cache map |
michael@0 | 263 | int32_t filePos = PR_Seek(mMapFD, 0, PR_SEEK_SET); |
michael@0 | 264 | if (filePos != 0) return NS_ERROR_UNEXPECTED; |
michael@0 | 265 | |
michael@0 | 266 | // write the header |
michael@0 | 267 | mHeader.Swap(); |
michael@0 | 268 | int32_t bytesWritten = PR_Write(mMapFD, &mHeader, sizeof(nsDiskCacheHeader)); |
michael@0 | 269 | mHeader.Unswap(); |
michael@0 | 270 | if (sizeof(nsDiskCacheHeader) != bytesWritten) { |
michael@0 | 271 | return NS_ERROR_UNEXPECTED; |
michael@0 | 272 | } |
michael@0 | 273 | |
michael@0 | 274 | PRStatus err = PR_Sync(mMapFD); |
michael@0 | 275 | if (err != PR_SUCCESS) return NS_ERROR_UNEXPECTED; |
michael@0 | 276 | |
michael@0 | 277 | // If we have a clean header then revalidate the cache clean file |
michael@0 | 278 | if (!mHeader.mIsDirty) { |
michael@0 | 279 | RevalidateCache(); |
michael@0 | 280 | } |
michael@0 | 281 | |
michael@0 | 282 | return NS_OK; |
michael@0 | 283 | } |
michael@0 | 284 | |
michael@0 | 285 | |
michael@0 | 286 | nsresult |
michael@0 | 287 | nsDiskCacheMap::FlushRecords(bool unswap) |
michael@0 | 288 | { |
michael@0 | 289 | if (!mMapFD) return NS_ERROR_NOT_AVAILABLE; |
michael@0 | 290 | |
michael@0 | 291 | // seek to beginning of buckets |
michael@0 | 292 | int32_t filePos = PR_Seek(mMapFD, sizeof(nsDiskCacheHeader), PR_SEEK_SET); |
michael@0 | 293 | if (filePos != sizeof(nsDiskCacheHeader)) |
michael@0 | 294 | return NS_ERROR_UNEXPECTED; |
michael@0 | 295 | |
michael@0 | 296 | #if defined(IS_LITTLE_ENDIAN) |
michael@0 | 297 | // Swap each record |
michael@0 | 298 | for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
michael@0 | 299 | if (mRecordArray[i].HashNumber()) |
michael@0 | 300 | mRecordArray[i].Swap(); |
michael@0 | 301 | } |
michael@0 | 302 | #endif |
michael@0 | 303 | |
michael@0 | 304 | int32_t recordArraySize = sizeof(nsDiskCacheRecord) * mHeader.mRecordCount; |
michael@0 | 305 | |
michael@0 | 306 | int32_t bytesWritten = PR_Write(mMapFD, mRecordArray, recordArraySize); |
michael@0 | 307 | if (bytesWritten != recordArraySize) |
michael@0 | 308 | return NS_ERROR_UNEXPECTED; |
michael@0 | 309 | |
michael@0 | 310 | #if defined(IS_LITTLE_ENDIAN) |
michael@0 | 311 | if (unswap) { |
michael@0 | 312 | // Unswap each record |
michael@0 | 313 | for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
michael@0 | 314 | if (mRecordArray[i].HashNumber()) |
michael@0 | 315 | mRecordArray[i].Unswap(); |
michael@0 | 316 | } |
michael@0 | 317 | } |
michael@0 | 318 | #endif |
michael@0 | 319 | |
michael@0 | 320 | return NS_OK; |
michael@0 | 321 | } |
michael@0 | 322 | |
michael@0 | 323 | |
michael@0 | 324 | /** |
michael@0 | 325 | * Record operations |
michael@0 | 326 | */ |
michael@0 | 327 | |
michael@0 | 328 | uint32_t |
michael@0 | 329 | nsDiskCacheMap::GetBucketRank(uint32_t bucketIndex, uint32_t targetRank) |
michael@0 | 330 | { |
michael@0 | 331 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 332 | uint32_t rank = 0; |
michael@0 | 333 | |
michael@0 | 334 | for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
michael@0 | 335 | if ((rank < records[i].EvictionRank()) && |
michael@0 | 336 | ((targetRank == 0) || (records[i].EvictionRank() < targetRank))) |
michael@0 | 337 | rank = records[i].EvictionRank(); |
michael@0 | 338 | } |
michael@0 | 339 | return rank; |
michael@0 | 340 | } |
michael@0 | 341 | |
michael@0 | 342 | nsresult |
michael@0 | 343 | nsDiskCacheMap::GrowRecords() |
michael@0 | 344 | { |
michael@0 | 345 | if (mHeader.mRecordCount >= mMaxRecordCount) |
michael@0 | 346 | return NS_OK; |
michael@0 | 347 | CACHE_LOG_DEBUG(("CACHE: GrowRecords\n")); |
michael@0 | 348 | |
michael@0 | 349 | // Resize the record array |
michael@0 | 350 | int32_t newCount = mHeader.mRecordCount << 1; |
michael@0 | 351 | if (newCount > mMaxRecordCount) |
michael@0 | 352 | newCount = mMaxRecordCount; |
michael@0 | 353 | nsDiskCacheRecord *newArray = (nsDiskCacheRecord *) |
michael@0 | 354 | PR_REALLOC(mRecordArray, newCount * sizeof(nsDiskCacheRecord)); |
michael@0 | 355 | if (!newArray) |
michael@0 | 356 | return NS_ERROR_OUT_OF_MEMORY; |
michael@0 | 357 | |
michael@0 | 358 | // Space out the buckets |
michael@0 | 359 | uint32_t oldRecordsPerBucket = GetRecordsPerBucket(); |
michael@0 | 360 | uint32_t newRecordsPerBucket = newCount / kBuckets; |
michael@0 | 361 | // Work from back to space out each bucket to the new array |
michael@0 | 362 | for (int bucketIndex = kBuckets - 1; bucketIndex >= 0; --bucketIndex) { |
michael@0 | 363 | // Move bucket |
michael@0 | 364 | nsDiskCacheRecord *newRecords = newArray + bucketIndex * newRecordsPerBucket; |
michael@0 | 365 | const uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
michael@0 | 366 | memmove(newRecords, |
michael@0 | 367 | newArray + bucketIndex * oldRecordsPerBucket, |
michael@0 | 368 | count * sizeof(nsDiskCacheRecord)); |
michael@0 | 369 | // clear unused records |
michael@0 | 370 | memset(newRecords + count, 0, |
michael@0 | 371 | (newRecordsPerBucket - count) * sizeof(nsDiskCacheRecord)); |
michael@0 | 372 | } |
michael@0 | 373 | |
michael@0 | 374 | // Set as the new record array |
michael@0 | 375 | mRecordArray = newArray; |
michael@0 | 376 | mHeader.mRecordCount = newCount; |
michael@0 | 377 | |
michael@0 | 378 | InvalidateCache(); |
michael@0 | 379 | |
michael@0 | 380 | return NS_OK; |
michael@0 | 381 | } |
michael@0 | 382 | |
michael@0 | 383 | nsresult |
michael@0 | 384 | nsDiskCacheMap::ShrinkRecords() |
michael@0 | 385 | { |
michael@0 | 386 | if (mHeader.mRecordCount <= kMinRecordCount) |
michael@0 | 387 | return NS_OK; |
michael@0 | 388 | CACHE_LOG_DEBUG(("CACHE: ShrinkRecords\n")); |
michael@0 | 389 | |
michael@0 | 390 | // Verify if we can shrink the record array: all buckets must be less than |
michael@0 | 391 | // 1/2 filled |
michael@0 | 392 | uint32_t maxUsage = 0, bucketIndex; |
michael@0 | 393 | for (bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) { |
michael@0 | 394 | if (maxUsage < mHeader.mBucketUsage[bucketIndex]) |
michael@0 | 395 | maxUsage = mHeader.mBucketUsage[bucketIndex]; |
michael@0 | 396 | } |
michael@0 | 397 | // Determine new bucket size, halve size until maxUsage |
michael@0 | 398 | uint32_t oldRecordsPerBucket = GetRecordsPerBucket(); |
michael@0 | 399 | uint32_t newRecordsPerBucket = oldRecordsPerBucket; |
michael@0 | 400 | while (maxUsage < (newRecordsPerBucket >> 1)) |
michael@0 | 401 | newRecordsPerBucket >>= 1; |
michael@0 | 402 | if (newRecordsPerBucket < (kMinRecordCount / kBuckets)) |
michael@0 | 403 | newRecordsPerBucket = (kMinRecordCount / kBuckets); |
michael@0 | 404 | NS_ASSERTION(newRecordsPerBucket <= oldRecordsPerBucket, |
michael@0 | 405 | "ShrinkRecords() can't grow records!"); |
michael@0 | 406 | if (newRecordsPerBucket == oldRecordsPerBucket) |
michael@0 | 407 | return NS_OK; |
michael@0 | 408 | // Move the buckets close to each other |
michael@0 | 409 | for (bucketIndex = 1; bucketIndex < kBuckets; ++bucketIndex) { |
michael@0 | 410 | // Move bucket |
michael@0 | 411 | memmove(mRecordArray + bucketIndex * newRecordsPerBucket, |
michael@0 | 412 | mRecordArray + bucketIndex * oldRecordsPerBucket, |
michael@0 | 413 | newRecordsPerBucket * sizeof(nsDiskCacheRecord)); |
michael@0 | 414 | } |
michael@0 | 415 | |
michael@0 | 416 | // Shrink the record array memory block itself |
michael@0 | 417 | uint32_t newCount = newRecordsPerBucket * kBuckets; |
michael@0 | 418 | nsDiskCacheRecord* newArray = (nsDiskCacheRecord *) |
michael@0 | 419 | PR_REALLOC(mRecordArray, newCount * sizeof(nsDiskCacheRecord)); |
michael@0 | 420 | if (!newArray) |
michael@0 | 421 | return NS_ERROR_OUT_OF_MEMORY; |
michael@0 | 422 | |
michael@0 | 423 | // Set as the new record array |
michael@0 | 424 | mRecordArray = newArray; |
michael@0 | 425 | mHeader.mRecordCount = newCount; |
michael@0 | 426 | |
michael@0 | 427 | InvalidateCache(); |
michael@0 | 428 | |
michael@0 | 429 | return NS_OK; |
michael@0 | 430 | } |
michael@0 | 431 | |
michael@0 | 432 | nsresult |
michael@0 | 433 | nsDiskCacheMap::AddRecord( nsDiskCacheRecord * mapRecord, |
michael@0 | 434 | nsDiskCacheRecord * oldRecord) |
michael@0 | 435 | { |
michael@0 | 436 | CACHE_LOG_DEBUG(("CACHE: AddRecord [%x]\n", mapRecord->HashNumber())); |
michael@0 | 437 | |
michael@0 | 438 | const uint32_t hashNumber = mapRecord->HashNumber(); |
michael@0 | 439 | const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
michael@0 | 440 | const uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
michael@0 | 441 | |
michael@0 | 442 | oldRecord->SetHashNumber(0); // signify no record |
michael@0 | 443 | |
michael@0 | 444 | if (count == GetRecordsPerBucket()) { |
michael@0 | 445 | // Ignore failure to grow the record space, we will then reuse old records |
michael@0 | 446 | GrowRecords(); |
michael@0 | 447 | } |
michael@0 | 448 | |
michael@0 | 449 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 450 | if (count < GetRecordsPerBucket()) { |
michael@0 | 451 | // stick the new record at the end |
michael@0 | 452 | records[count] = *mapRecord; |
michael@0 | 453 | mHeader.mEntryCount++; |
michael@0 | 454 | mHeader.mBucketUsage[bucketIndex]++; |
michael@0 | 455 | if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
michael@0 | 456 | mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
michael@0 | 457 | InvalidateCache(); |
michael@0 | 458 | } else { |
michael@0 | 459 | // Find the record with the highest eviction rank |
michael@0 | 460 | nsDiskCacheRecord * mostEvictable = &records[0]; |
michael@0 | 461 | for (int i = count-1; i > 0; i--) { |
michael@0 | 462 | if (records[i].EvictionRank() > mostEvictable->EvictionRank()) |
michael@0 | 463 | mostEvictable = &records[i]; |
michael@0 | 464 | } |
michael@0 | 465 | *oldRecord = *mostEvictable; // i == GetRecordsPerBucket(), so |
michael@0 | 466 | // evict the mostEvictable |
michael@0 | 467 | *mostEvictable = *mapRecord; // replace it with the new record |
michael@0 | 468 | // check if we need to update mostEvictable entry in header |
michael@0 | 469 | if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
michael@0 | 470 | mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
michael@0 | 471 | if (oldRecord->EvictionRank() >= mHeader.mEvictionRank[bucketIndex]) |
michael@0 | 472 | mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
michael@0 | 473 | InvalidateCache(); |
michael@0 | 474 | } |
michael@0 | 475 | |
michael@0 | 476 | NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == GetBucketRank(bucketIndex, 0), |
michael@0 | 477 | "eviction rank out of sync"); |
michael@0 | 478 | return NS_OK; |
michael@0 | 479 | } |
michael@0 | 480 | |
michael@0 | 481 | |
michael@0 | 482 | nsresult |
michael@0 | 483 | nsDiskCacheMap::UpdateRecord( nsDiskCacheRecord * mapRecord) |
michael@0 | 484 | { |
michael@0 | 485 | CACHE_LOG_DEBUG(("CACHE: UpdateRecord [%x]\n", mapRecord->HashNumber())); |
michael@0 | 486 | |
michael@0 | 487 | const uint32_t hashNumber = mapRecord->HashNumber(); |
michael@0 | 488 | const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
michael@0 | 489 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 490 | |
michael@0 | 491 | for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
michael@0 | 492 | if (records[i].HashNumber() == hashNumber) { |
michael@0 | 493 | const uint32_t oldRank = records[i].EvictionRank(); |
michael@0 | 494 | |
michael@0 | 495 | // stick the new record here |
michael@0 | 496 | records[i] = *mapRecord; |
michael@0 | 497 | |
michael@0 | 498 | // update eviction rank in header if necessary |
michael@0 | 499 | if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
michael@0 | 500 | mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
michael@0 | 501 | else if (mHeader.mEvictionRank[bucketIndex] == oldRank) |
michael@0 | 502 | mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
michael@0 | 503 | |
michael@0 | 504 | InvalidateCache(); |
michael@0 | 505 | |
michael@0 | 506 | NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == GetBucketRank(bucketIndex, 0), |
michael@0 | 507 | "eviction rank out of sync"); |
michael@0 | 508 | return NS_OK; |
michael@0 | 509 | } |
michael@0 | 510 | } |
michael@0 | 511 | NS_NOTREACHED("record not found"); |
michael@0 | 512 | return NS_ERROR_UNEXPECTED; |
michael@0 | 513 | } |
michael@0 | 514 | |
michael@0 | 515 | |
michael@0 | 516 | nsresult |
michael@0 | 517 | nsDiskCacheMap::FindRecord( uint32_t hashNumber, nsDiskCacheRecord * result) |
michael@0 | 518 | { |
michael@0 | 519 | const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
michael@0 | 520 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 521 | |
michael@0 | 522 | for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
michael@0 | 523 | if (records[i].HashNumber() == hashNumber) { |
michael@0 | 524 | *result = records[i]; // copy the record |
michael@0 | 525 | NS_ASSERTION(result->ValidRecord(), "bad cache map record"); |
michael@0 | 526 | return NS_OK; |
michael@0 | 527 | } |
michael@0 | 528 | } |
michael@0 | 529 | return NS_ERROR_CACHE_KEY_NOT_FOUND; |
michael@0 | 530 | } |
michael@0 | 531 | |
michael@0 | 532 | |
michael@0 | 533 | nsresult |
michael@0 | 534 | nsDiskCacheMap::DeleteRecord( nsDiskCacheRecord * mapRecord) |
michael@0 | 535 | { |
michael@0 | 536 | CACHE_LOG_DEBUG(("CACHE: DeleteRecord [%x]\n", mapRecord->HashNumber())); |
michael@0 | 537 | |
michael@0 | 538 | const uint32_t hashNumber = mapRecord->HashNumber(); |
michael@0 | 539 | const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
michael@0 | 540 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 541 | uint32_t last = mHeader.mBucketUsage[bucketIndex]-1; |
michael@0 | 542 | |
michael@0 | 543 | for (int i = last; i >= 0; i--) { |
michael@0 | 544 | if (records[i].HashNumber() == hashNumber) { |
michael@0 | 545 | // found it, now delete it. |
michael@0 | 546 | uint32_t evictionRank = records[i].EvictionRank(); |
michael@0 | 547 | NS_ASSERTION(evictionRank == mapRecord->EvictionRank(), |
michael@0 | 548 | "evictionRank out of sync"); |
michael@0 | 549 | // if not the last record, shift last record into opening |
michael@0 | 550 | records[i] = records[last]; |
michael@0 | 551 | records[last].SetHashNumber(0); // clear last record |
michael@0 | 552 | mHeader.mBucketUsage[bucketIndex] = last; |
michael@0 | 553 | mHeader.mEntryCount--; |
michael@0 | 554 | |
michael@0 | 555 | // update eviction rank |
michael@0 | 556 | uint32_t bucketIndex = GetBucketIndex(mapRecord->HashNumber()); |
michael@0 | 557 | if (mHeader.mEvictionRank[bucketIndex] <= evictionRank) { |
michael@0 | 558 | mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
michael@0 | 559 | } |
michael@0 | 560 | |
michael@0 | 561 | InvalidateCache(); |
michael@0 | 562 | |
michael@0 | 563 | NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == |
michael@0 | 564 | GetBucketRank(bucketIndex, 0), "eviction rank out of sync"); |
michael@0 | 565 | return NS_OK; |
michael@0 | 566 | } |
michael@0 | 567 | } |
michael@0 | 568 | return NS_ERROR_UNEXPECTED; |
michael@0 | 569 | } |
michael@0 | 570 | |
michael@0 | 571 | |
michael@0 | 572 | int32_t |
michael@0 | 573 | nsDiskCacheMap::VisitEachRecord(uint32_t bucketIndex, |
michael@0 | 574 | nsDiskCacheRecordVisitor * visitor, |
michael@0 | 575 | uint32_t evictionRank) |
michael@0 | 576 | { |
michael@0 | 577 | int32_t rv = kVisitNextRecord; |
michael@0 | 578 | uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
michael@0 | 579 | nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
michael@0 | 580 | |
michael@0 | 581 | // call visitor for each entry (matching any eviction rank) |
michael@0 | 582 | for (int i = count-1; i >= 0; i--) { |
michael@0 | 583 | if (evictionRank > records[i].EvictionRank()) continue; |
michael@0 | 584 | |
michael@0 | 585 | rv = visitor->VisitRecord(&records[i]); |
michael@0 | 586 | if (rv == kStopVisitingRecords) |
michael@0 | 587 | break; // Stop visiting records |
michael@0 | 588 | |
michael@0 | 589 | if (rv == kDeleteRecordAndContinue) { |
michael@0 | 590 | --count; |
michael@0 | 591 | records[i] = records[count]; |
michael@0 | 592 | records[count].SetHashNumber(0); |
michael@0 | 593 | InvalidateCache(); |
michael@0 | 594 | } |
michael@0 | 595 | } |
michael@0 | 596 | |
michael@0 | 597 | if (mHeader.mBucketUsage[bucketIndex] - count != 0) { |
michael@0 | 598 | mHeader.mEntryCount -= mHeader.mBucketUsage[bucketIndex] - count; |
michael@0 | 599 | mHeader.mBucketUsage[bucketIndex] = count; |
michael@0 | 600 | // recalc eviction rank |
michael@0 | 601 | mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
michael@0 | 602 | } |
michael@0 | 603 | NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == |
michael@0 | 604 | GetBucketRank(bucketIndex, 0), "eviction rank out of sync"); |
michael@0 | 605 | |
michael@0 | 606 | return rv; |
michael@0 | 607 | } |
michael@0 | 608 | |
michael@0 | 609 | |
michael@0 | 610 | /** |
michael@0 | 611 | * VisitRecords |
michael@0 | 612 | * |
michael@0 | 613 | * Visit every record in cache map in the most convenient order |
michael@0 | 614 | */ |
michael@0 | 615 | nsresult |
michael@0 | 616 | nsDiskCacheMap::VisitRecords( nsDiskCacheRecordVisitor * visitor) |
michael@0 | 617 | { |
michael@0 | 618 | for (int bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) { |
michael@0 | 619 | if (VisitEachRecord(bucketIndex, visitor, 0) == kStopVisitingRecords) |
michael@0 | 620 | break; |
michael@0 | 621 | } |
michael@0 | 622 | return NS_OK; |
michael@0 | 623 | } |
michael@0 | 624 | |
michael@0 | 625 | |
michael@0 | 626 | /** |
michael@0 | 627 | * EvictRecords |
michael@0 | 628 | * |
michael@0 | 629 | * Just like VisitRecords, but visits the records in order of their eviction rank |
michael@0 | 630 | */ |
michael@0 | 631 | nsresult |
michael@0 | 632 | nsDiskCacheMap::EvictRecords( nsDiskCacheRecordVisitor * visitor) |
michael@0 | 633 | { |
michael@0 | 634 | uint32_t tempRank[kBuckets]; |
michael@0 | 635 | int bucketIndex = 0; |
michael@0 | 636 | |
michael@0 | 637 | // copy eviction rank array |
michael@0 | 638 | for (bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) |
michael@0 | 639 | tempRank[bucketIndex] = mHeader.mEvictionRank[bucketIndex]; |
michael@0 | 640 | |
michael@0 | 641 | // Maximum number of iterations determined by number of records |
michael@0 | 642 | // as a safety limiter for the loop. Use a copy of mHeader.mEntryCount since |
michael@0 | 643 | // the value could decrease if some entry is evicted. |
michael@0 | 644 | int32_t entryCount = mHeader.mEntryCount; |
michael@0 | 645 | for (int n = 0; n < entryCount; ++n) { |
michael@0 | 646 | |
michael@0 | 647 | // find bucket with highest eviction rank |
michael@0 | 648 | uint32_t rank = 0; |
michael@0 | 649 | for (int i = 0; i < kBuckets; ++i) { |
michael@0 | 650 | if (rank < tempRank[i]) { |
michael@0 | 651 | rank = tempRank[i]; |
michael@0 | 652 | bucketIndex = i; |
michael@0 | 653 | } |
michael@0 | 654 | } |
michael@0 | 655 | |
michael@0 | 656 | if (rank == 0) break; // we've examined all the records |
michael@0 | 657 | |
michael@0 | 658 | // visit records in bucket with eviction ranks >= target eviction rank |
michael@0 | 659 | if (VisitEachRecord(bucketIndex, visitor, rank) == kStopVisitingRecords) |
michael@0 | 660 | break; |
michael@0 | 661 | |
michael@0 | 662 | // find greatest rank less than 'rank' |
michael@0 | 663 | tempRank[bucketIndex] = GetBucketRank(bucketIndex, rank); |
michael@0 | 664 | } |
michael@0 | 665 | return NS_OK; |
michael@0 | 666 | } |
michael@0 | 667 | |
michael@0 | 668 | |
michael@0 | 669 | |
michael@0 | 670 | nsresult |
michael@0 | 671 | nsDiskCacheMap::OpenBlockFiles(nsDiskCache::CorruptCacheInfo * corruptInfo) |
michael@0 | 672 | { |
michael@0 | 673 | NS_ENSURE_ARG_POINTER(corruptInfo); |
michael@0 | 674 | |
michael@0 | 675 | // create nsIFile for block file |
michael@0 | 676 | nsCOMPtr<nsIFile> blockFile; |
michael@0 | 677 | nsresult rv = NS_OK; |
michael@0 | 678 | *corruptInfo = nsDiskCache::kUnexpectedError; |
michael@0 | 679 | |
michael@0 | 680 | for (int i = 0; i < kNumBlockFiles; ++i) { |
michael@0 | 681 | rv = GetBlockFileForIndex(i, getter_AddRefs(blockFile)); |
michael@0 | 682 | if (NS_FAILED(rv)) { |
michael@0 | 683 | *corruptInfo = nsDiskCache::kCouldNotGetBlockFileForIndex; |
michael@0 | 684 | break; |
michael@0 | 685 | } |
michael@0 | 686 | |
michael@0 | 687 | uint32_t blockSize = GetBlockSizeForIndex(i+1); // +1 to match file selectors 1,2,3 |
michael@0 | 688 | uint32_t bitMapSize = GetBitMapSizeForIndex(i+1); |
michael@0 | 689 | rv = mBlockFile[i].Open(blockFile, blockSize, bitMapSize, corruptInfo); |
michael@0 | 690 | if (NS_FAILED(rv)) { |
michael@0 | 691 | // corruptInfo was set inside the call to mBlockFile[i].Open |
michael@0 | 692 | break; |
michael@0 | 693 | } |
michael@0 | 694 | } |
michael@0 | 695 | // close all files in case of any error |
michael@0 | 696 | if (NS_FAILED(rv)) |
michael@0 | 697 | (void)CloseBlockFiles(false); // we already have an error to report |
michael@0 | 698 | |
michael@0 | 699 | return rv; |
michael@0 | 700 | } |
michael@0 | 701 | |
michael@0 | 702 | |
michael@0 | 703 | nsresult |
michael@0 | 704 | nsDiskCacheMap::CloseBlockFiles(bool flush) |
michael@0 | 705 | { |
michael@0 | 706 | nsresult rv, rv2 = NS_OK; |
michael@0 | 707 | for (int i=0; i < kNumBlockFiles; ++i) { |
michael@0 | 708 | rv = mBlockFile[i].Close(flush); |
michael@0 | 709 | if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
michael@0 | 710 | } |
michael@0 | 711 | return rv2; |
michael@0 | 712 | } |
michael@0 | 713 | |
michael@0 | 714 | |
michael@0 | 715 | bool |
michael@0 | 716 | nsDiskCacheMap::CacheFilesExist() |
michael@0 | 717 | { |
michael@0 | 718 | nsCOMPtr<nsIFile> blockFile; |
michael@0 | 719 | nsresult rv; |
michael@0 | 720 | |
michael@0 | 721 | for (int i = 0; i < kNumBlockFiles; ++i) { |
michael@0 | 722 | bool exists; |
michael@0 | 723 | rv = GetBlockFileForIndex(i, getter_AddRefs(blockFile)); |
michael@0 | 724 | if (NS_FAILED(rv)) return false; |
michael@0 | 725 | |
michael@0 | 726 | rv = blockFile->Exists(&exists); |
michael@0 | 727 | if (NS_FAILED(rv) || !exists) return false; |
michael@0 | 728 | } |
michael@0 | 729 | |
michael@0 | 730 | return true; |
michael@0 | 731 | } |
michael@0 | 732 | |
michael@0 | 733 | |
michael@0 | 734 | nsresult |
michael@0 | 735 | nsDiskCacheMap::CreateCacheSubDirectories() |
michael@0 | 736 | { |
michael@0 | 737 | if (!mCacheDirectory) |
michael@0 | 738 | return NS_ERROR_UNEXPECTED; |
michael@0 | 739 | |
michael@0 | 740 | for (int32_t index = 0 ; index < 16 ; index++) { |
michael@0 | 741 | nsCOMPtr<nsIFile> file; |
michael@0 | 742 | nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
michael@0 | 743 | if (NS_FAILED(rv)) |
michael@0 | 744 | return rv; |
michael@0 | 745 | |
michael@0 | 746 | rv = file->AppendNative(nsPrintfCString("%X", index)); |
michael@0 | 747 | if (NS_FAILED(rv)) |
michael@0 | 748 | return rv; |
michael@0 | 749 | |
michael@0 | 750 | rv = file->Create(nsIFile::DIRECTORY_TYPE, 0700); |
michael@0 | 751 | if (NS_FAILED(rv)) |
michael@0 | 752 | return rv; |
michael@0 | 753 | } |
michael@0 | 754 | |
michael@0 | 755 | return NS_OK; |
michael@0 | 756 | } |
michael@0 | 757 | |
michael@0 | 758 | |
michael@0 | 759 | nsDiskCacheEntry * |
michael@0 | 760 | nsDiskCacheMap::ReadDiskCacheEntry(nsDiskCacheRecord * record) |
michael@0 | 761 | { |
michael@0 | 762 | CACHE_LOG_DEBUG(("CACHE: ReadDiskCacheEntry [%x]\n", record->HashNumber())); |
michael@0 | 763 | |
michael@0 | 764 | nsresult rv = NS_ERROR_UNEXPECTED; |
michael@0 | 765 | nsDiskCacheEntry * diskEntry = nullptr; |
michael@0 | 766 | uint32_t metaFile = record->MetaFile(); |
michael@0 | 767 | int32_t bytesRead = 0; |
michael@0 | 768 | |
michael@0 | 769 | if (!record->MetaLocationInitialized()) return nullptr; |
michael@0 | 770 | |
michael@0 | 771 | if (metaFile == 0) { // entry/metadata stored in separate file |
michael@0 | 772 | // open and read the file |
michael@0 | 773 | nsCOMPtr<nsIFile> file; |
michael@0 | 774 | rv = GetLocalFileForDiskCacheRecord(record, |
michael@0 | 775 | nsDiskCache::kMetaData, |
michael@0 | 776 | false, |
michael@0 | 777 | getter_AddRefs(file)); |
michael@0 | 778 | NS_ENSURE_SUCCESS(rv, nullptr); |
michael@0 | 779 | |
michael@0 | 780 | CACHE_LOG_DEBUG(("CACHE: nsDiskCacheMap::ReadDiskCacheEntry" |
michael@0 | 781 | "[this=%p] reading disk cache entry", this)); |
michael@0 | 782 | |
michael@0 | 783 | PRFileDesc * fd = nullptr; |
michael@0 | 784 | |
michael@0 | 785 | // open the file - restricted to user, the data could be confidential |
michael@0 | 786 | rv = file->OpenNSPRFileDesc(PR_RDONLY, 00600, &fd); |
michael@0 | 787 | NS_ENSURE_SUCCESS(rv, nullptr); |
michael@0 | 788 | |
michael@0 | 789 | int32_t fileSize = PR_Available(fd); |
michael@0 | 790 | if (fileSize < 0) { |
michael@0 | 791 | // an error occurred. We could call PR_GetError(), but how would that help? |
michael@0 | 792 | rv = NS_ERROR_UNEXPECTED; |
michael@0 | 793 | } else { |
michael@0 | 794 | rv = EnsureBuffer(fileSize); |
michael@0 | 795 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 796 | bytesRead = PR_Read(fd, mBuffer, fileSize); |
michael@0 | 797 | if (bytesRead < fileSize) { |
michael@0 | 798 | rv = NS_ERROR_UNEXPECTED; |
michael@0 | 799 | } |
michael@0 | 800 | } |
michael@0 | 801 | } |
michael@0 | 802 | PR_Close(fd); |
michael@0 | 803 | NS_ENSURE_SUCCESS(rv, nullptr); |
michael@0 | 804 | |
michael@0 | 805 | } else if (metaFile < (kNumBlockFiles + 1)) { |
michael@0 | 806 | // entry/metadata stored in cache block file |
michael@0 | 807 | |
michael@0 | 808 | // allocate buffer |
michael@0 | 809 | uint32_t blockCount = record->MetaBlockCount(); |
michael@0 | 810 | bytesRead = blockCount * GetBlockSizeForIndex(metaFile); |
michael@0 | 811 | |
michael@0 | 812 | rv = EnsureBuffer(bytesRead); |
michael@0 | 813 | NS_ENSURE_SUCCESS(rv, nullptr); |
michael@0 | 814 | |
michael@0 | 815 | // read diskEntry, note when the blocks are at the end of file, |
michael@0 | 816 | // bytesRead may be less than blockSize*blockCount. |
michael@0 | 817 | // But the bytesRead should at least agree with the real disk entry size. |
michael@0 | 818 | rv = mBlockFile[metaFile - 1].ReadBlocks(mBuffer, |
michael@0 | 819 | record->MetaStartBlock(), |
michael@0 | 820 | blockCount, |
michael@0 | 821 | &bytesRead); |
michael@0 | 822 | NS_ENSURE_SUCCESS(rv, nullptr); |
michael@0 | 823 | } |
michael@0 | 824 | diskEntry = (nsDiskCacheEntry *)mBuffer; |
michael@0 | 825 | diskEntry->Unswap(); // disk to memory |
michael@0 | 826 | // Check if calculated size agrees with bytesRead |
michael@0 | 827 | if (bytesRead < 0 || (uint32_t)bytesRead < diskEntry->Size()) |
michael@0 | 828 | return nullptr; |
michael@0 | 829 | |
michael@0 | 830 | // Return the buffer containing the diskEntry structure |
michael@0 | 831 | return diskEntry; |
michael@0 | 832 | } |
michael@0 | 833 | |
michael@0 | 834 | |
michael@0 | 835 | /** |
michael@0 | 836 | * CreateDiskCacheEntry(nsCacheEntry * entry) |
michael@0 | 837 | * |
michael@0 | 838 | * Prepare an nsCacheEntry for writing to disk |
michael@0 | 839 | */ |
michael@0 | 840 | nsDiskCacheEntry * |
michael@0 | 841 | nsDiskCacheMap::CreateDiskCacheEntry(nsDiskCacheBinding * binding, |
michael@0 | 842 | uint32_t * aSize) |
michael@0 | 843 | { |
michael@0 | 844 | nsCacheEntry * entry = binding->mCacheEntry; |
michael@0 | 845 | if (!entry) return nullptr; |
michael@0 | 846 | |
michael@0 | 847 | // Store security info, if it is serializable |
michael@0 | 848 | nsCOMPtr<nsISupports> infoObj = entry->SecurityInfo(); |
michael@0 | 849 | nsCOMPtr<nsISerializable> serializable = do_QueryInterface(infoObj); |
michael@0 | 850 | if (infoObj && !serializable) return nullptr; |
michael@0 | 851 | if (serializable) { |
michael@0 | 852 | nsCString info; |
michael@0 | 853 | nsresult rv = NS_SerializeToString(serializable, info); |
michael@0 | 854 | if (NS_FAILED(rv)) return nullptr; |
michael@0 | 855 | rv = entry->SetMetaDataElement("security-info", info.get()); |
michael@0 | 856 | if (NS_FAILED(rv)) return nullptr; |
michael@0 | 857 | } |
michael@0 | 858 | |
michael@0 | 859 | uint32_t keySize = entry->Key()->Length() + 1; |
michael@0 | 860 | uint32_t metaSize = entry->MetaDataSize(); |
michael@0 | 861 | uint32_t size = sizeof(nsDiskCacheEntry) + keySize + metaSize; |
michael@0 | 862 | |
michael@0 | 863 | if (aSize) *aSize = size; |
michael@0 | 864 | |
michael@0 | 865 | nsresult rv = EnsureBuffer(size); |
michael@0 | 866 | if (NS_FAILED(rv)) return nullptr; |
michael@0 | 867 | |
michael@0 | 868 | nsDiskCacheEntry *diskEntry = (nsDiskCacheEntry *)mBuffer; |
michael@0 | 869 | diskEntry->mHeaderVersion = nsDiskCache::kCurrentVersion; |
michael@0 | 870 | diskEntry->mMetaLocation = binding->mRecord.MetaLocation(); |
michael@0 | 871 | diskEntry->mFetchCount = entry->FetchCount(); |
michael@0 | 872 | diskEntry->mLastFetched = entry->LastFetched(); |
michael@0 | 873 | diskEntry->mLastModified = entry->LastModified(); |
michael@0 | 874 | diskEntry->mExpirationTime = entry->ExpirationTime(); |
michael@0 | 875 | diskEntry->mDataSize = entry->DataSize(); |
michael@0 | 876 | diskEntry->mKeySize = keySize; |
michael@0 | 877 | diskEntry->mMetaDataSize = metaSize; |
michael@0 | 878 | |
michael@0 | 879 | memcpy(diskEntry->Key(), entry->Key()->get(), keySize); |
michael@0 | 880 | |
michael@0 | 881 | rv = entry->FlattenMetaData(diskEntry->MetaData(), metaSize); |
michael@0 | 882 | if (NS_FAILED(rv)) return nullptr; |
michael@0 | 883 | |
michael@0 | 884 | return diskEntry; |
michael@0 | 885 | } |
michael@0 | 886 | |
michael@0 | 887 | |
michael@0 | 888 | nsresult |
michael@0 | 889 | nsDiskCacheMap::WriteDiskCacheEntry(nsDiskCacheBinding * binding) |
michael@0 | 890 | { |
michael@0 | 891 | CACHE_LOG_DEBUG(("CACHE: WriteDiskCacheEntry [%x]\n", |
michael@0 | 892 | binding->mRecord.HashNumber())); |
michael@0 | 893 | |
michael@0 | 894 | mozilla::eventtracer::AutoEventTracer writeDiskCacheEntry( |
michael@0 | 895 | binding->mCacheEntry, |
michael@0 | 896 | mozilla::eventtracer::eExec, |
michael@0 | 897 | mozilla::eventtracer::eDone, |
michael@0 | 898 | "net::cache::WriteDiskCacheEntry"); |
michael@0 | 899 | |
michael@0 | 900 | nsresult rv = NS_OK; |
michael@0 | 901 | uint32_t size; |
michael@0 | 902 | nsDiskCacheEntry * diskEntry = CreateDiskCacheEntry(binding, &size); |
michael@0 | 903 | if (!diskEntry) return NS_ERROR_UNEXPECTED; |
michael@0 | 904 | |
michael@0 | 905 | uint32_t fileIndex = CalculateFileIndex(size); |
michael@0 | 906 | |
michael@0 | 907 | // Deallocate old storage if necessary |
michael@0 | 908 | if (binding->mRecord.MetaLocationInitialized()) { |
michael@0 | 909 | // we have existing storage |
michael@0 | 910 | |
michael@0 | 911 | if ((binding->mRecord.MetaFile() == 0) && |
michael@0 | 912 | (fileIndex == 0)) { // keeping the separate file |
michael@0 | 913 | // just decrement total |
michael@0 | 914 | DecrementTotalSize(binding->mRecord.MetaFileSize()); |
michael@0 | 915 | NS_ASSERTION(binding->mRecord.MetaFileGeneration() == binding->mGeneration, |
michael@0 | 916 | "generations out of sync"); |
michael@0 | 917 | } else { |
michael@0 | 918 | rv = DeleteStorage(&binding->mRecord, nsDiskCache::kMetaData); |
michael@0 | 919 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 920 | } |
michael@0 | 921 | } |
michael@0 | 922 | |
michael@0 | 923 | binding->mRecord.SetEvictionRank(ULONG_MAX - SecondsFromPRTime(PR_Now())); |
michael@0 | 924 | // write entry data to disk cache block file |
michael@0 | 925 | diskEntry->Swap(); |
michael@0 | 926 | |
michael@0 | 927 | if (fileIndex != 0) { |
michael@0 | 928 | while (1) { |
michael@0 | 929 | uint32_t blockSize = GetBlockSizeForIndex(fileIndex); |
michael@0 | 930 | uint32_t blocks = ((size - 1) / blockSize) + 1; |
michael@0 | 931 | |
michael@0 | 932 | int32_t startBlock; |
michael@0 | 933 | rv = mBlockFile[fileIndex - 1].WriteBlocks(diskEntry, size, blocks, |
michael@0 | 934 | &startBlock); |
michael@0 | 935 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 936 | // update binding and cache map record |
michael@0 | 937 | binding->mRecord.SetMetaBlocks(fileIndex, startBlock, blocks); |
michael@0 | 938 | |
michael@0 | 939 | rv = UpdateRecord(&binding->mRecord); |
michael@0 | 940 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 941 | |
michael@0 | 942 | // XXX we should probably write out bucket ourselves |
michael@0 | 943 | |
michael@0 | 944 | IncrementTotalSize(blocks, blockSize); |
michael@0 | 945 | break; |
michael@0 | 946 | } |
michael@0 | 947 | |
michael@0 | 948 | if (fileIndex == kNumBlockFiles) { |
michael@0 | 949 | fileIndex = 0; // write data to separate file |
michael@0 | 950 | break; |
michael@0 | 951 | } |
michael@0 | 952 | |
michael@0 | 953 | // try next block file |
michael@0 | 954 | fileIndex++; |
michael@0 | 955 | } |
michael@0 | 956 | } |
michael@0 | 957 | |
michael@0 | 958 | if (fileIndex == 0) { |
michael@0 | 959 | // Write entry data to separate file |
michael@0 | 960 | uint32_t metaFileSizeK = ((size + 0x03FF) >> 10); // round up to nearest 1k |
michael@0 | 961 | if (metaFileSizeK > kMaxDataSizeK) |
michael@0 | 962 | metaFileSizeK = kMaxDataSizeK; |
michael@0 | 963 | |
michael@0 | 964 | binding->mRecord.SetMetaFileGeneration(binding->mGeneration); |
michael@0 | 965 | binding->mRecord.SetMetaFileSize(metaFileSizeK); |
michael@0 | 966 | rv = UpdateRecord(&binding->mRecord); |
michael@0 | 967 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 968 | |
michael@0 | 969 | nsCOMPtr<nsIFile> localFile; |
michael@0 | 970 | rv = GetLocalFileForDiskCacheRecord(&binding->mRecord, |
michael@0 | 971 | nsDiskCache::kMetaData, |
michael@0 | 972 | true, |
michael@0 | 973 | getter_AddRefs(localFile)); |
michael@0 | 974 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 975 | |
michael@0 | 976 | // open the file |
michael@0 | 977 | PRFileDesc * fd; |
michael@0 | 978 | // open the file - restricted to user, the data could be confidential |
michael@0 | 979 | rv = localFile->OpenNSPRFileDesc(PR_RDWR | PR_TRUNCATE | PR_CREATE_FILE, 00600, &fd); |
michael@0 | 980 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 981 | |
michael@0 | 982 | // write the file |
michael@0 | 983 | int32_t bytesWritten = PR_Write(fd, diskEntry, size); |
michael@0 | 984 | |
michael@0 | 985 | PRStatus err = PR_Close(fd); |
michael@0 | 986 | if ((bytesWritten != (int32_t)size) || (err != PR_SUCCESS)) { |
michael@0 | 987 | return NS_ERROR_UNEXPECTED; |
michael@0 | 988 | } |
michael@0 | 989 | |
michael@0 | 990 | IncrementTotalSize(metaFileSizeK); |
michael@0 | 991 | } |
michael@0 | 992 | |
michael@0 | 993 | return rv; |
michael@0 | 994 | } |
michael@0 | 995 | |
michael@0 | 996 | |
michael@0 | 997 | nsresult |
michael@0 | 998 | nsDiskCacheMap::ReadDataCacheBlocks(nsDiskCacheBinding * binding, char * buffer, uint32_t size) |
michael@0 | 999 | { |
michael@0 | 1000 | CACHE_LOG_DEBUG(("CACHE: ReadDataCacheBlocks [%x size=%u]\n", |
michael@0 | 1001 | binding->mRecord.HashNumber(), size)); |
michael@0 | 1002 | |
michael@0 | 1003 | uint32_t fileIndex = binding->mRecord.DataFile(); |
michael@0 | 1004 | int32_t readSize = size; |
michael@0 | 1005 | |
michael@0 | 1006 | nsresult rv = mBlockFile[fileIndex - 1].ReadBlocks(buffer, |
michael@0 | 1007 | binding->mRecord.DataStartBlock(), |
michael@0 | 1008 | binding->mRecord.DataBlockCount(), |
michael@0 | 1009 | &readSize); |
michael@0 | 1010 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1011 | if (readSize < (int32_t)size) { |
michael@0 | 1012 | rv = NS_ERROR_UNEXPECTED; |
michael@0 | 1013 | } |
michael@0 | 1014 | return rv; |
michael@0 | 1015 | } |
michael@0 | 1016 | |
michael@0 | 1017 | |
michael@0 | 1018 | nsresult |
michael@0 | 1019 | nsDiskCacheMap::WriteDataCacheBlocks(nsDiskCacheBinding * binding, char * buffer, uint32_t size) |
michael@0 | 1020 | { |
michael@0 | 1021 | CACHE_LOG_DEBUG(("CACHE: WriteDataCacheBlocks [%x size=%u]\n", |
michael@0 | 1022 | binding->mRecord.HashNumber(), size)); |
michael@0 | 1023 | |
michael@0 | 1024 | mozilla::eventtracer::AutoEventTracer writeDataCacheBlocks( |
michael@0 | 1025 | binding->mCacheEntry, |
michael@0 | 1026 | mozilla::eventtracer::eExec, |
michael@0 | 1027 | mozilla::eventtracer::eDone, |
michael@0 | 1028 | "net::cache::WriteDataCacheBlocks"); |
michael@0 | 1029 | |
michael@0 | 1030 | nsresult rv = NS_OK; |
michael@0 | 1031 | |
michael@0 | 1032 | // determine block file & number of blocks |
michael@0 | 1033 | uint32_t fileIndex = CalculateFileIndex(size); |
michael@0 | 1034 | uint32_t blockCount = 0; |
michael@0 | 1035 | int32_t startBlock = 0; |
michael@0 | 1036 | |
michael@0 | 1037 | if (size > 0) { |
michael@0 | 1038 | // if fileIndex is 0, bad things happen below, which makes gcc 4.7 |
michael@0 | 1039 | // complain, but it's not supposed to happen. See bug 854105. |
michael@0 | 1040 | MOZ_ASSERT(fileIndex); |
michael@0 | 1041 | while (fileIndex) { |
michael@0 | 1042 | uint32_t blockSize = GetBlockSizeForIndex(fileIndex); |
michael@0 | 1043 | blockCount = ((size - 1) / blockSize) + 1; |
michael@0 | 1044 | |
michael@0 | 1045 | rv = mBlockFile[fileIndex - 1].WriteBlocks(buffer, size, blockCount, |
michael@0 | 1046 | &startBlock); |
michael@0 | 1047 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1048 | IncrementTotalSize(blockCount, blockSize); |
michael@0 | 1049 | break; |
michael@0 | 1050 | } |
michael@0 | 1051 | |
michael@0 | 1052 | if (fileIndex == kNumBlockFiles) |
michael@0 | 1053 | return rv; |
michael@0 | 1054 | |
michael@0 | 1055 | fileIndex++; |
michael@0 | 1056 | } |
michael@0 | 1057 | } |
michael@0 | 1058 | |
michael@0 | 1059 | // update binding and cache map record |
michael@0 | 1060 | binding->mRecord.SetDataBlocks(fileIndex, startBlock, blockCount); |
michael@0 | 1061 | if (!binding->mDoomed) { |
michael@0 | 1062 | rv = UpdateRecord(&binding->mRecord); |
michael@0 | 1063 | } |
michael@0 | 1064 | return rv; |
michael@0 | 1065 | } |
michael@0 | 1066 | |
michael@0 | 1067 | |
michael@0 | 1068 | nsresult |
michael@0 | 1069 | nsDiskCacheMap::DeleteStorage(nsDiskCacheRecord * record) |
michael@0 | 1070 | { |
michael@0 | 1071 | nsresult rv1 = DeleteStorage(record, nsDiskCache::kData); |
michael@0 | 1072 | nsresult rv2 = DeleteStorage(record, nsDiskCache::kMetaData); |
michael@0 | 1073 | return NS_FAILED(rv1) ? rv1 : rv2; |
michael@0 | 1074 | } |
michael@0 | 1075 | |
michael@0 | 1076 | |
michael@0 | 1077 | nsresult |
michael@0 | 1078 | nsDiskCacheMap::DeleteStorage(nsDiskCacheRecord * record, bool metaData) |
michael@0 | 1079 | { |
michael@0 | 1080 | CACHE_LOG_DEBUG(("CACHE: DeleteStorage [%x %u]\n", record->HashNumber(), |
michael@0 | 1081 | metaData)); |
michael@0 | 1082 | |
michael@0 | 1083 | nsresult rv = NS_ERROR_UNEXPECTED; |
michael@0 | 1084 | uint32_t fileIndex = metaData ? record->MetaFile() : record->DataFile(); |
michael@0 | 1085 | nsCOMPtr<nsIFile> file; |
michael@0 | 1086 | |
michael@0 | 1087 | if (fileIndex == 0) { |
michael@0 | 1088 | // delete the file |
michael@0 | 1089 | uint32_t sizeK = metaData ? record->MetaFileSize() : record->DataFileSize(); |
michael@0 | 1090 | // XXX if sizeK == USHRT_MAX, stat file for actual size |
michael@0 | 1091 | |
michael@0 | 1092 | rv = GetFileForDiskCacheRecord(record, metaData, false, getter_AddRefs(file)); |
michael@0 | 1093 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1094 | rv = file->Remove(false); // false == non-recursive |
michael@0 | 1095 | } |
michael@0 | 1096 | DecrementTotalSize(sizeK); |
michael@0 | 1097 | |
michael@0 | 1098 | } else if (fileIndex < (kNumBlockFiles + 1)) { |
michael@0 | 1099 | // deallocate blocks |
michael@0 | 1100 | uint32_t startBlock = metaData ? record->MetaStartBlock() : record->DataStartBlock(); |
michael@0 | 1101 | uint32_t blockCount = metaData ? record->MetaBlockCount() : record->DataBlockCount(); |
michael@0 | 1102 | |
michael@0 | 1103 | rv = mBlockFile[fileIndex - 1].DeallocateBlocks(startBlock, blockCount); |
michael@0 | 1104 | DecrementTotalSize(blockCount, GetBlockSizeForIndex(fileIndex)); |
michael@0 | 1105 | } |
michael@0 | 1106 | if (metaData) record->ClearMetaLocation(); |
michael@0 | 1107 | else record->ClearDataLocation(); |
michael@0 | 1108 | |
michael@0 | 1109 | return rv; |
michael@0 | 1110 | } |
michael@0 | 1111 | |
michael@0 | 1112 | |
michael@0 | 1113 | nsresult |
michael@0 | 1114 | nsDiskCacheMap::GetFileForDiskCacheRecord(nsDiskCacheRecord * record, |
michael@0 | 1115 | bool meta, |
michael@0 | 1116 | bool createPath, |
michael@0 | 1117 | nsIFile ** result) |
michael@0 | 1118 | { |
michael@0 | 1119 | if (!mCacheDirectory) return NS_ERROR_NOT_AVAILABLE; |
michael@0 | 1120 | |
michael@0 | 1121 | nsCOMPtr<nsIFile> file; |
michael@0 | 1122 | nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
michael@0 | 1123 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1124 | |
michael@0 | 1125 | uint32_t hash = record->HashNumber(); |
michael@0 | 1126 | |
michael@0 | 1127 | // The file is stored under subdirectories according to the hash number: |
michael@0 | 1128 | // 0x01234567 -> 0/12/ |
michael@0 | 1129 | rv = file->AppendNative(nsPrintfCString("%X", hash >> 28)); |
michael@0 | 1130 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1131 | rv = file->AppendNative(nsPrintfCString("%02X", (hash >> 20) & 0xFF)); |
michael@0 | 1132 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1133 | |
michael@0 | 1134 | bool exists; |
michael@0 | 1135 | if (createPath && (NS_FAILED(file->Exists(&exists)) || !exists)) { |
michael@0 | 1136 | rv = file->Create(nsIFile::DIRECTORY_TYPE, 0700); |
michael@0 | 1137 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1138 | } |
michael@0 | 1139 | |
michael@0 | 1140 | int16_t generation = record->Generation(); |
michael@0 | 1141 | char name[32]; |
michael@0 | 1142 | // Cut the beginning of the hash that was used in the path |
michael@0 | 1143 | ::sprintf(name, "%05X%c%02X", hash & 0xFFFFF, (meta ? 'm' : 'd'), |
michael@0 | 1144 | generation); |
michael@0 | 1145 | rv = file->AppendNative(nsDependentCString(name)); |
michael@0 | 1146 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1147 | |
michael@0 | 1148 | NS_IF_ADDREF(*result = file); |
michael@0 | 1149 | return rv; |
michael@0 | 1150 | } |
michael@0 | 1151 | |
michael@0 | 1152 | |
michael@0 | 1153 | nsresult |
michael@0 | 1154 | nsDiskCacheMap::GetLocalFileForDiskCacheRecord(nsDiskCacheRecord * record, |
michael@0 | 1155 | bool meta, |
michael@0 | 1156 | bool createPath, |
michael@0 | 1157 | nsIFile ** result) |
michael@0 | 1158 | { |
michael@0 | 1159 | nsCOMPtr<nsIFile> file; |
michael@0 | 1160 | nsresult rv = GetFileForDiskCacheRecord(record, |
michael@0 | 1161 | meta, |
michael@0 | 1162 | createPath, |
michael@0 | 1163 | getter_AddRefs(file)); |
michael@0 | 1164 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1165 | |
michael@0 | 1166 | NS_IF_ADDREF(*result = file); |
michael@0 | 1167 | return rv; |
michael@0 | 1168 | } |
michael@0 | 1169 | |
michael@0 | 1170 | |
michael@0 | 1171 | nsresult |
michael@0 | 1172 | nsDiskCacheMap::GetBlockFileForIndex(uint32_t index, nsIFile ** result) |
michael@0 | 1173 | { |
michael@0 | 1174 | if (!mCacheDirectory) return NS_ERROR_NOT_AVAILABLE; |
michael@0 | 1175 | |
michael@0 | 1176 | nsCOMPtr<nsIFile> file; |
michael@0 | 1177 | nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
michael@0 | 1178 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1179 | |
michael@0 | 1180 | char name[32]; |
michael@0 | 1181 | ::sprintf(name, "_CACHE_%03d_", index + 1); |
michael@0 | 1182 | rv = file->AppendNative(nsDependentCString(name)); |
michael@0 | 1183 | if (NS_FAILED(rv)) return rv; |
michael@0 | 1184 | |
michael@0 | 1185 | NS_IF_ADDREF(*result = file); |
michael@0 | 1186 | |
michael@0 | 1187 | return rv; |
michael@0 | 1188 | } |
michael@0 | 1189 | |
michael@0 | 1190 | |
michael@0 | 1191 | uint32_t |
michael@0 | 1192 | nsDiskCacheMap::CalculateFileIndex(uint32_t size) |
michael@0 | 1193 | { |
michael@0 | 1194 | // We prefer to use block file with larger block if the wasted space would |
michael@0 | 1195 | // be the same. E.g. store entry with size of 3073 bytes in 1 4K-block |
michael@0 | 1196 | // instead of in 4 1K-blocks. |
michael@0 | 1197 | |
michael@0 | 1198 | if (size <= 3 * BLOCK_SIZE_FOR_INDEX(1)) return 1; |
michael@0 | 1199 | if (size <= 3 * BLOCK_SIZE_FOR_INDEX(2)) return 2; |
michael@0 | 1200 | if (size <= 4 * BLOCK_SIZE_FOR_INDEX(3)) return 3; |
michael@0 | 1201 | return 0; |
michael@0 | 1202 | } |
michael@0 | 1203 | |
michael@0 | 1204 | nsresult |
michael@0 | 1205 | nsDiskCacheMap::EnsureBuffer(uint32_t bufSize) |
michael@0 | 1206 | { |
michael@0 | 1207 | if (mBufferSize < bufSize) { |
michael@0 | 1208 | char * buf = (char *)PR_REALLOC(mBuffer, bufSize); |
michael@0 | 1209 | if (!buf) { |
michael@0 | 1210 | mBufferSize = 0; |
michael@0 | 1211 | return NS_ERROR_OUT_OF_MEMORY; |
michael@0 | 1212 | } |
michael@0 | 1213 | mBuffer = buf; |
michael@0 | 1214 | mBufferSize = bufSize; |
michael@0 | 1215 | } |
michael@0 | 1216 | return NS_OK; |
michael@0 | 1217 | } |
michael@0 | 1218 | |
michael@0 | 1219 | void |
michael@0 | 1220 | nsDiskCacheMap::NotifyCapacityChange(uint32_t capacity) |
michael@0 | 1221 | { |
michael@0 | 1222 | // Heuristic 1. average cache entry size is probably around 1KB |
michael@0 | 1223 | // Heuristic 2. we don't want more than 32MB reserved to store the record |
michael@0 | 1224 | // map in memory. |
michael@0 | 1225 | const int32_t RECORD_COUNT_LIMIT = 32 * 1024 * 1024 / sizeof(nsDiskCacheRecord); |
michael@0 | 1226 | int32_t maxRecordCount = std::min(int32_t(capacity), RECORD_COUNT_LIMIT); |
michael@0 | 1227 | if (mMaxRecordCount < maxRecordCount) { |
michael@0 | 1228 | // We can only grow |
michael@0 | 1229 | mMaxRecordCount = maxRecordCount; |
michael@0 | 1230 | } |
michael@0 | 1231 | } |
michael@0 | 1232 | |
michael@0 | 1233 | size_t |
michael@0 | 1234 | nsDiskCacheMap::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) |
michael@0 | 1235 | { |
michael@0 | 1236 | size_t usage = aMallocSizeOf(mRecordArray); |
michael@0 | 1237 | |
michael@0 | 1238 | usage += aMallocSizeOf(mBuffer); |
michael@0 | 1239 | usage += aMallocSizeOf(mMapFD); |
michael@0 | 1240 | usage += aMallocSizeOf(mCleanFD); |
michael@0 | 1241 | usage += aMallocSizeOf(mCacheDirectory); |
michael@0 | 1242 | usage += aMallocSizeOf(mCleanCacheTimer); |
michael@0 | 1243 | |
michael@0 | 1244 | for (int i = 0; i < kNumBlockFiles; i++) { |
michael@0 | 1245 | usage += mBlockFile[i].SizeOfExcludingThis(aMallocSizeOf); |
michael@0 | 1246 | } |
michael@0 | 1247 | |
michael@0 | 1248 | return usage; |
michael@0 | 1249 | } |
michael@0 | 1250 | |
michael@0 | 1251 | nsresult |
michael@0 | 1252 | nsDiskCacheMap::InitCacheClean(nsIFile * cacheDirectory, |
michael@0 | 1253 | nsDiskCache::CorruptCacheInfo * corruptInfo, |
michael@0 | 1254 | bool reportCacheCleanTelemetryData) |
michael@0 | 1255 | { |
michael@0 | 1256 | // The _CACHE_CLEAN_ file will be used in the future to determine |
michael@0 | 1257 | // if the cache is clean or not. |
michael@0 | 1258 | bool cacheCleanFileExists = false; |
michael@0 | 1259 | nsCOMPtr<nsIFile> cacheCleanFile; |
michael@0 | 1260 | nsresult rv = cacheDirectory->GetParent(getter_AddRefs(cacheCleanFile)); |
michael@0 | 1261 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1262 | rv = cacheCleanFile->AppendNative( |
michael@0 | 1263 | NS_LITERAL_CSTRING("_CACHE_CLEAN_")); |
michael@0 | 1264 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1265 | // Check if the file already exists, if it does, we will later read the |
michael@0 | 1266 | // value and report it to telemetry. |
michael@0 | 1267 | cacheCleanFile->Exists(&cacheCleanFileExists); |
michael@0 | 1268 | } |
michael@0 | 1269 | } |
michael@0 | 1270 | if (NS_FAILED(rv)) { |
michael@0 | 1271 | NS_WARNING("Could not build cache clean file path"); |
michael@0 | 1272 | *corruptInfo = nsDiskCache::kCacheCleanFilePathError; |
michael@0 | 1273 | return rv; |
michael@0 | 1274 | } |
michael@0 | 1275 | |
michael@0 | 1276 | // Make sure the _CACHE_CLEAN_ file exists |
michael@0 | 1277 | rv = cacheCleanFile->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE, |
michael@0 | 1278 | 00600, &mCleanFD); |
michael@0 | 1279 | if (NS_FAILED(rv)) { |
michael@0 | 1280 | NS_WARNING("Could not open cache clean file"); |
michael@0 | 1281 | *corruptInfo = nsDiskCache::kCacheCleanOpenFileError; |
michael@0 | 1282 | return rv; |
michael@0 | 1283 | } |
michael@0 | 1284 | |
michael@0 | 1285 | if (cacheCleanFileExists) { |
michael@0 | 1286 | char clean = '0'; |
michael@0 | 1287 | int32_t bytesRead = PR_Read(mCleanFD, &clean, 1); |
michael@0 | 1288 | if (bytesRead != 1) { |
michael@0 | 1289 | NS_WARNING("Could not read _CACHE_CLEAN_ file contents"); |
michael@0 | 1290 | } else if (reportCacheCleanTelemetryData) { |
michael@0 | 1291 | Telemetry::Accumulate(Telemetry::DISK_CACHE_REDUCTION_TRIAL, |
michael@0 | 1292 | clean == '1' ? 1 : 0); |
michael@0 | 1293 | } |
michael@0 | 1294 | } |
michael@0 | 1295 | |
michael@0 | 1296 | // Create a timer that will be used to validate the cache |
michael@0 | 1297 | // as long as an activity threshold was met |
michael@0 | 1298 | mCleanCacheTimer = do_CreateInstance("@mozilla.org/timer;1", &rv); |
michael@0 | 1299 | if (NS_SUCCEEDED(rv)) { |
michael@0 | 1300 | mCleanCacheTimer->SetTarget(nsCacheService::GlobalInstance()->mCacheIOThread); |
michael@0 | 1301 | rv = ResetCacheTimer(); |
michael@0 | 1302 | } |
michael@0 | 1303 | |
michael@0 | 1304 | if (NS_FAILED(rv)) { |
michael@0 | 1305 | NS_WARNING("Could not create cache clean timer"); |
michael@0 | 1306 | mCleanCacheTimer = nullptr; |
michael@0 | 1307 | *corruptInfo = nsDiskCache::kCacheCleanTimerError; |
michael@0 | 1308 | return rv; |
michael@0 | 1309 | } |
michael@0 | 1310 | |
michael@0 | 1311 | return NS_OK; |
michael@0 | 1312 | } |
michael@0 | 1313 | |
michael@0 | 1314 | nsresult |
michael@0 | 1315 | nsDiskCacheMap::WriteCacheClean(bool clean) |
michael@0 | 1316 | { |
michael@0 | 1317 | nsCacheService::AssertOwnsLock(); |
michael@0 | 1318 | if (!mCleanFD) { |
michael@0 | 1319 | NS_WARNING("Cache clean file is not open!"); |
michael@0 | 1320 | return NS_ERROR_FAILURE; |
michael@0 | 1321 | } |
michael@0 | 1322 | |
michael@0 | 1323 | CACHE_LOG_DEBUG(("CACHE: WriteCacheClean: %d\n", clean? 1 : 0)); |
michael@0 | 1324 | // I'm using a simple '1' or '0' to denote cache clean |
michael@0 | 1325 | // since it can be edited easily by any text editor for testing. |
michael@0 | 1326 | char data = clean? '1' : '0'; |
michael@0 | 1327 | int32_t filePos = PR_Seek(mCleanFD, 0, PR_SEEK_SET); |
michael@0 | 1328 | if (filePos != 0) { |
michael@0 | 1329 | NS_WARNING("Could not seek in cache clean file!"); |
michael@0 | 1330 | return NS_ERROR_FAILURE; |
michael@0 | 1331 | } |
michael@0 | 1332 | int32_t bytesWritten = PR_Write(mCleanFD, &data, 1); |
michael@0 | 1333 | if (bytesWritten != 1) { |
michael@0 | 1334 | NS_WARNING("Could not write cache clean file!"); |
michael@0 | 1335 | return NS_ERROR_FAILURE; |
michael@0 | 1336 | } |
michael@0 | 1337 | PRStatus err = PR_Sync(mCleanFD); |
michael@0 | 1338 | if (err != PR_SUCCESS) { |
michael@0 | 1339 | NS_WARNING("Could not flush cache clean file!"); |
michael@0 | 1340 | } |
michael@0 | 1341 | |
michael@0 | 1342 | return NS_OK; |
michael@0 | 1343 | } |
michael@0 | 1344 | |
michael@0 | 1345 | nsresult |
michael@0 | 1346 | nsDiskCacheMap::InvalidateCache() |
michael@0 | 1347 | { |
michael@0 | 1348 | nsCacheService::AssertOwnsLock(); |
michael@0 | 1349 | CACHE_LOG_DEBUG(("CACHE: InvalidateCache\n")); |
michael@0 | 1350 | nsresult rv; |
michael@0 | 1351 | |
michael@0 | 1352 | if (!mIsDirtyCacheFlushed) { |
michael@0 | 1353 | rv = WriteCacheClean(false); |
michael@0 | 1354 | if (NS_FAILED(rv)) { |
michael@0 | 1355 | Telemetry::Accumulate(Telemetry::DISK_CACHE_INVALIDATION_SUCCESS, 0); |
michael@0 | 1356 | return rv; |
michael@0 | 1357 | } |
michael@0 | 1358 | |
michael@0 | 1359 | Telemetry::Accumulate(Telemetry::DISK_CACHE_INVALIDATION_SUCCESS, 1); |
michael@0 | 1360 | mIsDirtyCacheFlushed = true; |
michael@0 | 1361 | } |
michael@0 | 1362 | |
michael@0 | 1363 | rv = ResetCacheTimer(); |
michael@0 | 1364 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1365 | |
michael@0 | 1366 | return NS_OK; |
michael@0 | 1367 | } |
michael@0 | 1368 | |
michael@0 | 1369 | nsresult |
michael@0 | 1370 | nsDiskCacheMap::ResetCacheTimer(int32_t timeout) |
michael@0 | 1371 | { |
michael@0 | 1372 | mCleanCacheTimer->Cancel(); |
michael@0 | 1373 | nsresult rv = |
michael@0 | 1374 | mCleanCacheTimer->InitWithFuncCallback(RevalidateTimerCallback, |
michael@0 | 1375 | nullptr, timeout, |
michael@0 | 1376 | nsITimer::TYPE_ONE_SHOT); |
michael@0 | 1377 | NS_ENSURE_SUCCESS(rv, rv); |
michael@0 | 1378 | mLastInvalidateTime = PR_IntervalNow(); |
michael@0 | 1379 | |
michael@0 | 1380 | return rv; |
michael@0 | 1381 | } |
michael@0 | 1382 | |
michael@0 | 1383 | void |
michael@0 | 1384 | nsDiskCacheMap::RevalidateTimerCallback(nsITimer *aTimer, void *arg) |
michael@0 | 1385 | { |
michael@0 | 1386 | nsCacheServiceAutoLock lock(LOCK_TELEM(NSDISKCACHEMAP_REVALIDATION)); |
michael@0 | 1387 | if (!nsCacheService::gService->mDiskDevice || |
michael@0 | 1388 | !nsCacheService::gService->mDiskDevice->Initialized()) { |
michael@0 | 1389 | return; |
michael@0 | 1390 | } |
michael@0 | 1391 | |
michael@0 | 1392 | nsDiskCacheMap *diskCacheMap = |
michael@0 | 1393 | &nsCacheService::gService->mDiskDevice->mCacheMap; |
michael@0 | 1394 | |
michael@0 | 1395 | // If we have less than kRevalidateCacheTimeout since the last timer was |
michael@0 | 1396 | // issued then another thread called InvalidateCache. This won't catch |
michael@0 | 1397 | // all cases where we wanted to cancel the timer, but under the lock it |
michael@0 | 1398 | // is always OK to revalidate as long as IsCacheInSafeState() returns |
michael@0 | 1399 | // true. We just want to avoid revalidating when we can to reduce IO |
michael@0 | 1400 | // and this check will do that. |
michael@0 | 1401 | uint32_t delta = |
michael@0 | 1402 | PR_IntervalToMilliseconds(PR_IntervalNow() - |
michael@0 | 1403 | diskCacheMap->mLastInvalidateTime) + |
michael@0 | 1404 | kRevalidateCacheTimeoutTolerance; |
michael@0 | 1405 | if (delta < kRevalidateCacheTimeout) { |
michael@0 | 1406 | diskCacheMap->ResetCacheTimer(); |
michael@0 | 1407 | return; |
michael@0 | 1408 | } |
michael@0 | 1409 | |
michael@0 | 1410 | nsresult rv = diskCacheMap->RevalidateCache(); |
michael@0 | 1411 | if (NS_FAILED(rv)) { |
michael@0 | 1412 | diskCacheMap->ResetCacheTimer(kRevalidateCacheErrorTimeout); |
michael@0 | 1413 | } |
michael@0 | 1414 | } |
michael@0 | 1415 | |
michael@0 | 1416 | bool |
michael@0 | 1417 | nsDiskCacheMap::IsCacheInSafeState() |
michael@0 | 1418 | { |
michael@0 | 1419 | return nsCacheService::GlobalInstance()->IsDoomListEmpty(); |
michael@0 | 1420 | } |
michael@0 | 1421 | |
michael@0 | 1422 | nsresult |
michael@0 | 1423 | nsDiskCacheMap::RevalidateCache() |
michael@0 | 1424 | { |
michael@0 | 1425 | CACHE_LOG_DEBUG(("CACHE: RevalidateCache\n")); |
michael@0 | 1426 | nsresult rv; |
michael@0 | 1427 | |
michael@0 | 1428 | if (!IsCacheInSafeState()) { |
michael@0 | 1429 | Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SAFE, 0); |
michael@0 | 1430 | CACHE_LOG_DEBUG(("CACHE: Revalidation should not performed because " |
michael@0 | 1431 | "cache not in a safe state\n")); |
michael@0 | 1432 | // Normally we would return an error here, but there is a bug where |
michael@0 | 1433 | // the doom list sometimes gets an entry 'stuck' and doens't clear it |
michael@0 | 1434 | // until browser shutdown. So we allow revalidation for the time being |
michael@0 | 1435 | // to get proper telemetry data of how much the cache corruption plan |
michael@0 | 1436 | // would help. |
michael@0 | 1437 | } else { |
michael@0 | 1438 | Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SAFE, 1); |
michael@0 | 1439 | } |
michael@0 | 1440 | |
michael@0 | 1441 | // We want this after the lock to prove that flushing a file isn't that expensive |
michael@0 | 1442 | Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_REVALIDATION> totalTimer; |
michael@0 | 1443 | |
michael@0 | 1444 | // If telemetry data shows it is worth it, we'll be flushing headers and |
michael@0 | 1445 | // records before flushing the clean cache file. |
michael@0 | 1446 | |
michael@0 | 1447 | // Write out the _CACHE_CLEAN_ file with '1' |
michael@0 | 1448 | rv = WriteCacheClean(true); |
michael@0 | 1449 | if (NS_FAILED(rv)) { |
michael@0 | 1450 | Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SUCCESS, 0); |
michael@0 | 1451 | return rv; |
michael@0 | 1452 | } |
michael@0 | 1453 | |
michael@0 | 1454 | Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SUCCESS, 1); |
michael@0 | 1455 | mIsDirtyCacheFlushed = false; |
michael@0 | 1456 | |
michael@0 | 1457 | return NS_OK; |
michael@0 | 1458 | } |