|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
|
2 /* vim:set ts=4 sw=4 sts=4 cin et: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 #include "nsCache.h" |
|
8 #include "nsDiskCacheMap.h" |
|
9 #include "nsDiskCacheBinding.h" |
|
10 #include "nsDiskCacheEntry.h" |
|
11 #include "nsDiskCacheDevice.h" |
|
12 #include "nsCacheService.h" |
|
13 |
|
14 #include <string.h> |
|
15 #include "nsPrintfCString.h" |
|
16 |
|
17 #include "nsISerializable.h" |
|
18 #include "nsSerializationHelper.h" |
|
19 |
|
20 #include "mozilla/MemoryReporting.h" |
|
21 #include "mozilla/Telemetry.h" |
|
22 #include "mozilla/VisualEventTracer.h" |
|
23 #include <algorithm> |
|
24 |
|
25 using namespace mozilla; |
|
26 |
|
27 /****************************************************************************** |
|
28 * nsDiskCacheMap |
|
29 *****************************************************************************/ |
|
30 |
|
31 /** |
|
32 * File operations |
|
33 */ |
|
34 |
|
35 nsresult |
|
36 nsDiskCacheMap::Open(nsIFile * cacheDirectory, |
|
37 nsDiskCache::CorruptCacheInfo * corruptInfo, |
|
38 bool reportCacheCleanTelemetryData) |
|
39 { |
|
40 NS_ENSURE_ARG_POINTER(corruptInfo); |
|
41 |
|
42 // Assume we have an unexpected error until we find otherwise. |
|
43 *corruptInfo = nsDiskCache::kUnexpectedError; |
|
44 NS_ENSURE_ARG_POINTER(cacheDirectory); |
|
45 if (mMapFD) return NS_ERROR_ALREADY_INITIALIZED; |
|
46 |
|
47 mCacheDirectory = cacheDirectory; // save a reference for ourselves |
|
48 |
|
49 // create nsIFile for _CACHE_MAP_ |
|
50 nsresult rv; |
|
51 nsCOMPtr<nsIFile> file; |
|
52 rv = cacheDirectory->Clone(getter_AddRefs(file)); |
|
53 rv = file->AppendNative(NS_LITERAL_CSTRING("_CACHE_MAP_")); |
|
54 NS_ENSURE_SUCCESS(rv, rv); |
|
55 |
|
56 // open the file - restricted to user, the data could be confidential |
|
57 rv = file->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE, 00600, &mMapFD); |
|
58 if (NS_FAILED(rv)) { |
|
59 *corruptInfo = nsDiskCache::kOpenCacheMapError; |
|
60 NS_WARNING("Could not open cache map file"); |
|
61 return NS_ERROR_FILE_CORRUPTED; |
|
62 } |
|
63 |
|
64 bool cacheFilesExist = CacheFilesExist(); |
|
65 rv = NS_ERROR_FILE_CORRUPTED; // presume the worst |
|
66 uint32_t mapSize = PR_Available(mMapFD); |
|
67 |
|
68 if (NS_FAILED(InitCacheClean(cacheDirectory, |
|
69 corruptInfo, |
|
70 reportCacheCleanTelemetryData))) { |
|
71 // corruptInfo is set in the call to InitCacheClean |
|
72 goto error_exit; |
|
73 } |
|
74 |
|
75 // check size of map file |
|
76 if (mapSize == 0) { // creating a new _CACHE_MAP_ |
|
77 |
|
78 // block files shouldn't exist if we're creating the _CACHE_MAP_ |
|
79 if (cacheFilesExist) { |
|
80 *corruptInfo = nsDiskCache::kBlockFilesShouldNotExist; |
|
81 goto error_exit; |
|
82 } |
|
83 |
|
84 if (NS_FAILED(CreateCacheSubDirectories())) { |
|
85 *corruptInfo = nsDiskCache::kCreateCacheSubdirectories; |
|
86 goto error_exit; |
|
87 } |
|
88 |
|
89 // create the file - initialize in memory |
|
90 memset(&mHeader, 0, sizeof(nsDiskCacheHeader)); |
|
91 mHeader.mVersion = nsDiskCache::kCurrentVersion; |
|
92 mHeader.mRecordCount = kMinRecordCount; |
|
93 mRecordArray = (nsDiskCacheRecord *) |
|
94 PR_CALLOC(mHeader.mRecordCount * sizeof(nsDiskCacheRecord)); |
|
95 if (!mRecordArray) { |
|
96 *corruptInfo = nsDiskCache::kOutOfMemory; |
|
97 rv = NS_ERROR_OUT_OF_MEMORY; |
|
98 goto error_exit; |
|
99 } |
|
100 } else if (mapSize >= sizeof(nsDiskCacheHeader)) { // read existing _CACHE_MAP_ |
|
101 |
|
102 // if _CACHE_MAP_ exists, so should the block files |
|
103 if (!cacheFilesExist) { |
|
104 *corruptInfo = nsDiskCache::kBlockFilesShouldExist; |
|
105 goto error_exit; |
|
106 } |
|
107 |
|
108 CACHE_LOG_DEBUG(("CACHE: nsDiskCacheMap::Open [this=%p] reading map", this)); |
|
109 |
|
110 // read the header |
|
111 uint32_t bytesRead = PR_Read(mMapFD, &mHeader, sizeof(nsDiskCacheHeader)); |
|
112 if (sizeof(nsDiskCacheHeader) != bytesRead) { |
|
113 *corruptInfo = nsDiskCache::kHeaderSizeNotRead; |
|
114 goto error_exit; |
|
115 } |
|
116 mHeader.Unswap(); |
|
117 |
|
118 if (mHeader.mIsDirty) { |
|
119 *corruptInfo = nsDiskCache::kHeaderIsDirty; |
|
120 goto error_exit; |
|
121 } |
|
122 |
|
123 if (mHeader.mVersion != nsDiskCache::kCurrentVersion) { |
|
124 *corruptInfo = nsDiskCache::kVersionMismatch; |
|
125 goto error_exit; |
|
126 } |
|
127 |
|
128 uint32_t recordArraySize = |
|
129 mHeader.mRecordCount * sizeof(nsDiskCacheRecord); |
|
130 if (mapSize < recordArraySize + sizeof(nsDiskCacheHeader)) { |
|
131 *corruptInfo = nsDiskCache::kRecordsIncomplete; |
|
132 goto error_exit; |
|
133 } |
|
134 |
|
135 // Get the space for the records |
|
136 mRecordArray = (nsDiskCacheRecord *) PR_MALLOC(recordArraySize); |
|
137 if (!mRecordArray) { |
|
138 *corruptInfo = nsDiskCache::kOutOfMemory; |
|
139 rv = NS_ERROR_OUT_OF_MEMORY; |
|
140 goto error_exit; |
|
141 } |
|
142 |
|
143 // Read the records |
|
144 bytesRead = PR_Read(mMapFD, mRecordArray, recordArraySize); |
|
145 if (bytesRead < recordArraySize) { |
|
146 *corruptInfo = nsDiskCache::kNotEnoughToRead; |
|
147 goto error_exit; |
|
148 } |
|
149 |
|
150 // Unswap each record |
|
151 int32_t total = 0; |
|
152 for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
|
153 if (mRecordArray[i].HashNumber()) { |
|
154 #if defined(IS_LITTLE_ENDIAN) |
|
155 mRecordArray[i].Unswap(); |
|
156 #endif |
|
157 total ++; |
|
158 } |
|
159 } |
|
160 |
|
161 // verify entry count |
|
162 if (total != mHeader.mEntryCount) { |
|
163 *corruptInfo = nsDiskCache::kEntryCountIncorrect; |
|
164 goto error_exit; |
|
165 } |
|
166 |
|
167 } else { |
|
168 *corruptInfo = nsDiskCache::kHeaderIncomplete; |
|
169 goto error_exit; |
|
170 } |
|
171 |
|
172 rv = OpenBlockFiles(corruptInfo); |
|
173 if (NS_FAILED(rv)) { |
|
174 // corruptInfo is set in the call to OpenBlockFiles |
|
175 goto error_exit; |
|
176 } |
|
177 |
|
178 // set dirty bit and flush header |
|
179 mHeader.mIsDirty = true; |
|
180 rv = FlushHeader(); |
|
181 if (NS_FAILED(rv)) { |
|
182 *corruptInfo = nsDiskCache::kFlushHeaderError; |
|
183 goto error_exit; |
|
184 } |
|
185 |
|
186 Telemetry::Accumulate(Telemetry::HTTP_DISK_CACHE_OVERHEAD, |
|
187 (uint32_t)SizeOfExcludingThis(moz_malloc_size_of)); |
|
188 |
|
189 *corruptInfo = nsDiskCache::kNotCorrupt; |
|
190 return NS_OK; |
|
191 |
|
192 error_exit: |
|
193 (void) Close(false); |
|
194 |
|
195 return rv; |
|
196 } |
|
197 |
|
198 |
|
199 nsresult |
|
200 nsDiskCacheMap::Close(bool flush) |
|
201 { |
|
202 nsCacheService::AssertOwnsLock(); |
|
203 nsresult rv = NS_OK; |
|
204 |
|
205 // Cancel any pending cache validation event, the FlushRecords call below |
|
206 // will validate the cache. |
|
207 if (mCleanCacheTimer) { |
|
208 mCleanCacheTimer->Cancel(); |
|
209 } |
|
210 |
|
211 // If cache map file and its block files are still open, close them |
|
212 if (mMapFD) { |
|
213 // close block files |
|
214 rv = CloseBlockFiles(flush); |
|
215 if (NS_SUCCEEDED(rv) && flush && mRecordArray) { |
|
216 // write the map records |
|
217 rv = FlushRecords(false); // don't bother swapping buckets back |
|
218 if (NS_SUCCEEDED(rv)) { |
|
219 // clear dirty bit |
|
220 mHeader.mIsDirty = false; |
|
221 rv = FlushHeader(); |
|
222 } |
|
223 } |
|
224 if ((PR_Close(mMapFD) != PR_SUCCESS) && (NS_SUCCEEDED(rv))) |
|
225 rv = NS_ERROR_UNEXPECTED; |
|
226 |
|
227 mMapFD = nullptr; |
|
228 } |
|
229 |
|
230 if (mCleanFD) { |
|
231 PR_Close(mCleanFD); |
|
232 mCleanFD = nullptr; |
|
233 } |
|
234 |
|
235 PR_FREEIF(mRecordArray); |
|
236 PR_FREEIF(mBuffer); |
|
237 mBufferSize = 0; |
|
238 return rv; |
|
239 } |
|
240 |
|
241 |
|
242 nsresult |
|
243 nsDiskCacheMap::Trim() |
|
244 { |
|
245 nsresult rv, rv2 = NS_OK; |
|
246 for (int i=0; i < kNumBlockFiles; ++i) { |
|
247 rv = mBlockFile[i].Trim(); |
|
248 if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
|
249 } |
|
250 // Try to shrink the records array |
|
251 rv = ShrinkRecords(); |
|
252 if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
|
253 return rv2; |
|
254 } |
|
255 |
|
256 |
|
257 nsresult |
|
258 nsDiskCacheMap::FlushHeader() |
|
259 { |
|
260 if (!mMapFD) return NS_ERROR_NOT_AVAILABLE; |
|
261 |
|
262 // seek to beginning of cache map |
|
263 int32_t filePos = PR_Seek(mMapFD, 0, PR_SEEK_SET); |
|
264 if (filePos != 0) return NS_ERROR_UNEXPECTED; |
|
265 |
|
266 // write the header |
|
267 mHeader.Swap(); |
|
268 int32_t bytesWritten = PR_Write(mMapFD, &mHeader, sizeof(nsDiskCacheHeader)); |
|
269 mHeader.Unswap(); |
|
270 if (sizeof(nsDiskCacheHeader) != bytesWritten) { |
|
271 return NS_ERROR_UNEXPECTED; |
|
272 } |
|
273 |
|
274 PRStatus err = PR_Sync(mMapFD); |
|
275 if (err != PR_SUCCESS) return NS_ERROR_UNEXPECTED; |
|
276 |
|
277 // If we have a clean header then revalidate the cache clean file |
|
278 if (!mHeader.mIsDirty) { |
|
279 RevalidateCache(); |
|
280 } |
|
281 |
|
282 return NS_OK; |
|
283 } |
|
284 |
|
285 |
|
286 nsresult |
|
287 nsDiskCacheMap::FlushRecords(bool unswap) |
|
288 { |
|
289 if (!mMapFD) return NS_ERROR_NOT_AVAILABLE; |
|
290 |
|
291 // seek to beginning of buckets |
|
292 int32_t filePos = PR_Seek(mMapFD, sizeof(nsDiskCacheHeader), PR_SEEK_SET); |
|
293 if (filePos != sizeof(nsDiskCacheHeader)) |
|
294 return NS_ERROR_UNEXPECTED; |
|
295 |
|
296 #if defined(IS_LITTLE_ENDIAN) |
|
297 // Swap each record |
|
298 for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
|
299 if (mRecordArray[i].HashNumber()) |
|
300 mRecordArray[i].Swap(); |
|
301 } |
|
302 #endif |
|
303 |
|
304 int32_t recordArraySize = sizeof(nsDiskCacheRecord) * mHeader.mRecordCount; |
|
305 |
|
306 int32_t bytesWritten = PR_Write(mMapFD, mRecordArray, recordArraySize); |
|
307 if (bytesWritten != recordArraySize) |
|
308 return NS_ERROR_UNEXPECTED; |
|
309 |
|
310 #if defined(IS_LITTLE_ENDIAN) |
|
311 if (unswap) { |
|
312 // Unswap each record |
|
313 for (int32_t i = 0; i < mHeader.mRecordCount; ++i) { |
|
314 if (mRecordArray[i].HashNumber()) |
|
315 mRecordArray[i].Unswap(); |
|
316 } |
|
317 } |
|
318 #endif |
|
319 |
|
320 return NS_OK; |
|
321 } |
|
322 |
|
323 |
|
324 /** |
|
325 * Record operations |
|
326 */ |
|
327 |
|
328 uint32_t |
|
329 nsDiskCacheMap::GetBucketRank(uint32_t bucketIndex, uint32_t targetRank) |
|
330 { |
|
331 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
332 uint32_t rank = 0; |
|
333 |
|
334 for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
|
335 if ((rank < records[i].EvictionRank()) && |
|
336 ((targetRank == 0) || (records[i].EvictionRank() < targetRank))) |
|
337 rank = records[i].EvictionRank(); |
|
338 } |
|
339 return rank; |
|
340 } |
|
341 |
|
342 nsresult |
|
343 nsDiskCacheMap::GrowRecords() |
|
344 { |
|
345 if (mHeader.mRecordCount >= mMaxRecordCount) |
|
346 return NS_OK; |
|
347 CACHE_LOG_DEBUG(("CACHE: GrowRecords\n")); |
|
348 |
|
349 // Resize the record array |
|
350 int32_t newCount = mHeader.mRecordCount << 1; |
|
351 if (newCount > mMaxRecordCount) |
|
352 newCount = mMaxRecordCount; |
|
353 nsDiskCacheRecord *newArray = (nsDiskCacheRecord *) |
|
354 PR_REALLOC(mRecordArray, newCount * sizeof(nsDiskCacheRecord)); |
|
355 if (!newArray) |
|
356 return NS_ERROR_OUT_OF_MEMORY; |
|
357 |
|
358 // Space out the buckets |
|
359 uint32_t oldRecordsPerBucket = GetRecordsPerBucket(); |
|
360 uint32_t newRecordsPerBucket = newCount / kBuckets; |
|
361 // Work from back to space out each bucket to the new array |
|
362 for (int bucketIndex = kBuckets - 1; bucketIndex >= 0; --bucketIndex) { |
|
363 // Move bucket |
|
364 nsDiskCacheRecord *newRecords = newArray + bucketIndex * newRecordsPerBucket; |
|
365 const uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
|
366 memmove(newRecords, |
|
367 newArray + bucketIndex * oldRecordsPerBucket, |
|
368 count * sizeof(nsDiskCacheRecord)); |
|
369 // clear unused records |
|
370 memset(newRecords + count, 0, |
|
371 (newRecordsPerBucket - count) * sizeof(nsDiskCacheRecord)); |
|
372 } |
|
373 |
|
374 // Set as the new record array |
|
375 mRecordArray = newArray; |
|
376 mHeader.mRecordCount = newCount; |
|
377 |
|
378 InvalidateCache(); |
|
379 |
|
380 return NS_OK; |
|
381 } |
|
382 |
|
383 nsresult |
|
384 nsDiskCacheMap::ShrinkRecords() |
|
385 { |
|
386 if (mHeader.mRecordCount <= kMinRecordCount) |
|
387 return NS_OK; |
|
388 CACHE_LOG_DEBUG(("CACHE: ShrinkRecords\n")); |
|
389 |
|
390 // Verify if we can shrink the record array: all buckets must be less than |
|
391 // 1/2 filled |
|
392 uint32_t maxUsage = 0, bucketIndex; |
|
393 for (bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) { |
|
394 if (maxUsage < mHeader.mBucketUsage[bucketIndex]) |
|
395 maxUsage = mHeader.mBucketUsage[bucketIndex]; |
|
396 } |
|
397 // Determine new bucket size, halve size until maxUsage |
|
398 uint32_t oldRecordsPerBucket = GetRecordsPerBucket(); |
|
399 uint32_t newRecordsPerBucket = oldRecordsPerBucket; |
|
400 while (maxUsage < (newRecordsPerBucket >> 1)) |
|
401 newRecordsPerBucket >>= 1; |
|
402 if (newRecordsPerBucket < (kMinRecordCount / kBuckets)) |
|
403 newRecordsPerBucket = (kMinRecordCount / kBuckets); |
|
404 NS_ASSERTION(newRecordsPerBucket <= oldRecordsPerBucket, |
|
405 "ShrinkRecords() can't grow records!"); |
|
406 if (newRecordsPerBucket == oldRecordsPerBucket) |
|
407 return NS_OK; |
|
408 // Move the buckets close to each other |
|
409 for (bucketIndex = 1; bucketIndex < kBuckets; ++bucketIndex) { |
|
410 // Move bucket |
|
411 memmove(mRecordArray + bucketIndex * newRecordsPerBucket, |
|
412 mRecordArray + bucketIndex * oldRecordsPerBucket, |
|
413 newRecordsPerBucket * sizeof(nsDiskCacheRecord)); |
|
414 } |
|
415 |
|
416 // Shrink the record array memory block itself |
|
417 uint32_t newCount = newRecordsPerBucket * kBuckets; |
|
418 nsDiskCacheRecord* newArray = (nsDiskCacheRecord *) |
|
419 PR_REALLOC(mRecordArray, newCount * sizeof(nsDiskCacheRecord)); |
|
420 if (!newArray) |
|
421 return NS_ERROR_OUT_OF_MEMORY; |
|
422 |
|
423 // Set as the new record array |
|
424 mRecordArray = newArray; |
|
425 mHeader.mRecordCount = newCount; |
|
426 |
|
427 InvalidateCache(); |
|
428 |
|
429 return NS_OK; |
|
430 } |
|
431 |
|
432 nsresult |
|
433 nsDiskCacheMap::AddRecord( nsDiskCacheRecord * mapRecord, |
|
434 nsDiskCacheRecord * oldRecord) |
|
435 { |
|
436 CACHE_LOG_DEBUG(("CACHE: AddRecord [%x]\n", mapRecord->HashNumber())); |
|
437 |
|
438 const uint32_t hashNumber = mapRecord->HashNumber(); |
|
439 const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
|
440 const uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
|
441 |
|
442 oldRecord->SetHashNumber(0); // signify no record |
|
443 |
|
444 if (count == GetRecordsPerBucket()) { |
|
445 // Ignore failure to grow the record space, we will then reuse old records |
|
446 GrowRecords(); |
|
447 } |
|
448 |
|
449 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
450 if (count < GetRecordsPerBucket()) { |
|
451 // stick the new record at the end |
|
452 records[count] = *mapRecord; |
|
453 mHeader.mEntryCount++; |
|
454 mHeader.mBucketUsage[bucketIndex]++; |
|
455 if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
|
456 mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
|
457 InvalidateCache(); |
|
458 } else { |
|
459 // Find the record with the highest eviction rank |
|
460 nsDiskCacheRecord * mostEvictable = &records[0]; |
|
461 for (int i = count-1; i > 0; i--) { |
|
462 if (records[i].EvictionRank() > mostEvictable->EvictionRank()) |
|
463 mostEvictable = &records[i]; |
|
464 } |
|
465 *oldRecord = *mostEvictable; // i == GetRecordsPerBucket(), so |
|
466 // evict the mostEvictable |
|
467 *mostEvictable = *mapRecord; // replace it with the new record |
|
468 // check if we need to update mostEvictable entry in header |
|
469 if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
|
470 mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
|
471 if (oldRecord->EvictionRank() >= mHeader.mEvictionRank[bucketIndex]) |
|
472 mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
|
473 InvalidateCache(); |
|
474 } |
|
475 |
|
476 NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == GetBucketRank(bucketIndex, 0), |
|
477 "eviction rank out of sync"); |
|
478 return NS_OK; |
|
479 } |
|
480 |
|
481 |
|
482 nsresult |
|
483 nsDiskCacheMap::UpdateRecord( nsDiskCacheRecord * mapRecord) |
|
484 { |
|
485 CACHE_LOG_DEBUG(("CACHE: UpdateRecord [%x]\n", mapRecord->HashNumber())); |
|
486 |
|
487 const uint32_t hashNumber = mapRecord->HashNumber(); |
|
488 const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
|
489 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
490 |
|
491 for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
|
492 if (records[i].HashNumber() == hashNumber) { |
|
493 const uint32_t oldRank = records[i].EvictionRank(); |
|
494 |
|
495 // stick the new record here |
|
496 records[i] = *mapRecord; |
|
497 |
|
498 // update eviction rank in header if necessary |
|
499 if (mHeader.mEvictionRank[bucketIndex] < mapRecord->EvictionRank()) |
|
500 mHeader.mEvictionRank[bucketIndex] = mapRecord->EvictionRank(); |
|
501 else if (mHeader.mEvictionRank[bucketIndex] == oldRank) |
|
502 mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
|
503 |
|
504 InvalidateCache(); |
|
505 |
|
506 NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == GetBucketRank(bucketIndex, 0), |
|
507 "eviction rank out of sync"); |
|
508 return NS_OK; |
|
509 } |
|
510 } |
|
511 NS_NOTREACHED("record not found"); |
|
512 return NS_ERROR_UNEXPECTED; |
|
513 } |
|
514 |
|
515 |
|
516 nsresult |
|
517 nsDiskCacheMap::FindRecord( uint32_t hashNumber, nsDiskCacheRecord * result) |
|
518 { |
|
519 const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
|
520 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
521 |
|
522 for (int i = mHeader.mBucketUsage[bucketIndex]-1; i >= 0; i--) { |
|
523 if (records[i].HashNumber() == hashNumber) { |
|
524 *result = records[i]; // copy the record |
|
525 NS_ASSERTION(result->ValidRecord(), "bad cache map record"); |
|
526 return NS_OK; |
|
527 } |
|
528 } |
|
529 return NS_ERROR_CACHE_KEY_NOT_FOUND; |
|
530 } |
|
531 |
|
532 |
|
533 nsresult |
|
534 nsDiskCacheMap::DeleteRecord( nsDiskCacheRecord * mapRecord) |
|
535 { |
|
536 CACHE_LOG_DEBUG(("CACHE: DeleteRecord [%x]\n", mapRecord->HashNumber())); |
|
537 |
|
538 const uint32_t hashNumber = mapRecord->HashNumber(); |
|
539 const uint32_t bucketIndex = GetBucketIndex(hashNumber); |
|
540 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
541 uint32_t last = mHeader.mBucketUsage[bucketIndex]-1; |
|
542 |
|
543 for (int i = last; i >= 0; i--) { |
|
544 if (records[i].HashNumber() == hashNumber) { |
|
545 // found it, now delete it. |
|
546 uint32_t evictionRank = records[i].EvictionRank(); |
|
547 NS_ASSERTION(evictionRank == mapRecord->EvictionRank(), |
|
548 "evictionRank out of sync"); |
|
549 // if not the last record, shift last record into opening |
|
550 records[i] = records[last]; |
|
551 records[last].SetHashNumber(0); // clear last record |
|
552 mHeader.mBucketUsage[bucketIndex] = last; |
|
553 mHeader.mEntryCount--; |
|
554 |
|
555 // update eviction rank |
|
556 uint32_t bucketIndex = GetBucketIndex(mapRecord->HashNumber()); |
|
557 if (mHeader.mEvictionRank[bucketIndex] <= evictionRank) { |
|
558 mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
|
559 } |
|
560 |
|
561 InvalidateCache(); |
|
562 |
|
563 NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == |
|
564 GetBucketRank(bucketIndex, 0), "eviction rank out of sync"); |
|
565 return NS_OK; |
|
566 } |
|
567 } |
|
568 return NS_ERROR_UNEXPECTED; |
|
569 } |
|
570 |
|
571 |
|
572 int32_t |
|
573 nsDiskCacheMap::VisitEachRecord(uint32_t bucketIndex, |
|
574 nsDiskCacheRecordVisitor * visitor, |
|
575 uint32_t evictionRank) |
|
576 { |
|
577 int32_t rv = kVisitNextRecord; |
|
578 uint32_t count = mHeader.mBucketUsage[bucketIndex]; |
|
579 nsDiskCacheRecord * records = GetFirstRecordInBucket(bucketIndex); |
|
580 |
|
581 // call visitor for each entry (matching any eviction rank) |
|
582 for (int i = count-1; i >= 0; i--) { |
|
583 if (evictionRank > records[i].EvictionRank()) continue; |
|
584 |
|
585 rv = visitor->VisitRecord(&records[i]); |
|
586 if (rv == kStopVisitingRecords) |
|
587 break; // Stop visiting records |
|
588 |
|
589 if (rv == kDeleteRecordAndContinue) { |
|
590 --count; |
|
591 records[i] = records[count]; |
|
592 records[count].SetHashNumber(0); |
|
593 InvalidateCache(); |
|
594 } |
|
595 } |
|
596 |
|
597 if (mHeader.mBucketUsage[bucketIndex] - count != 0) { |
|
598 mHeader.mEntryCount -= mHeader.mBucketUsage[bucketIndex] - count; |
|
599 mHeader.mBucketUsage[bucketIndex] = count; |
|
600 // recalc eviction rank |
|
601 mHeader.mEvictionRank[bucketIndex] = GetBucketRank(bucketIndex, 0); |
|
602 } |
|
603 NS_ASSERTION(mHeader.mEvictionRank[bucketIndex] == |
|
604 GetBucketRank(bucketIndex, 0), "eviction rank out of sync"); |
|
605 |
|
606 return rv; |
|
607 } |
|
608 |
|
609 |
|
610 /** |
|
611 * VisitRecords |
|
612 * |
|
613 * Visit every record in cache map in the most convenient order |
|
614 */ |
|
615 nsresult |
|
616 nsDiskCacheMap::VisitRecords( nsDiskCacheRecordVisitor * visitor) |
|
617 { |
|
618 for (int bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) { |
|
619 if (VisitEachRecord(bucketIndex, visitor, 0) == kStopVisitingRecords) |
|
620 break; |
|
621 } |
|
622 return NS_OK; |
|
623 } |
|
624 |
|
625 |
|
626 /** |
|
627 * EvictRecords |
|
628 * |
|
629 * Just like VisitRecords, but visits the records in order of their eviction rank |
|
630 */ |
|
631 nsresult |
|
632 nsDiskCacheMap::EvictRecords( nsDiskCacheRecordVisitor * visitor) |
|
633 { |
|
634 uint32_t tempRank[kBuckets]; |
|
635 int bucketIndex = 0; |
|
636 |
|
637 // copy eviction rank array |
|
638 for (bucketIndex = 0; bucketIndex < kBuckets; ++bucketIndex) |
|
639 tempRank[bucketIndex] = mHeader.mEvictionRank[bucketIndex]; |
|
640 |
|
641 // Maximum number of iterations determined by number of records |
|
642 // as a safety limiter for the loop. Use a copy of mHeader.mEntryCount since |
|
643 // the value could decrease if some entry is evicted. |
|
644 int32_t entryCount = mHeader.mEntryCount; |
|
645 for (int n = 0; n < entryCount; ++n) { |
|
646 |
|
647 // find bucket with highest eviction rank |
|
648 uint32_t rank = 0; |
|
649 for (int i = 0; i < kBuckets; ++i) { |
|
650 if (rank < tempRank[i]) { |
|
651 rank = tempRank[i]; |
|
652 bucketIndex = i; |
|
653 } |
|
654 } |
|
655 |
|
656 if (rank == 0) break; // we've examined all the records |
|
657 |
|
658 // visit records in bucket with eviction ranks >= target eviction rank |
|
659 if (VisitEachRecord(bucketIndex, visitor, rank) == kStopVisitingRecords) |
|
660 break; |
|
661 |
|
662 // find greatest rank less than 'rank' |
|
663 tempRank[bucketIndex] = GetBucketRank(bucketIndex, rank); |
|
664 } |
|
665 return NS_OK; |
|
666 } |
|
667 |
|
668 |
|
669 |
|
670 nsresult |
|
671 nsDiskCacheMap::OpenBlockFiles(nsDiskCache::CorruptCacheInfo * corruptInfo) |
|
672 { |
|
673 NS_ENSURE_ARG_POINTER(corruptInfo); |
|
674 |
|
675 // create nsIFile for block file |
|
676 nsCOMPtr<nsIFile> blockFile; |
|
677 nsresult rv = NS_OK; |
|
678 *corruptInfo = nsDiskCache::kUnexpectedError; |
|
679 |
|
680 for (int i = 0; i < kNumBlockFiles; ++i) { |
|
681 rv = GetBlockFileForIndex(i, getter_AddRefs(blockFile)); |
|
682 if (NS_FAILED(rv)) { |
|
683 *corruptInfo = nsDiskCache::kCouldNotGetBlockFileForIndex; |
|
684 break; |
|
685 } |
|
686 |
|
687 uint32_t blockSize = GetBlockSizeForIndex(i+1); // +1 to match file selectors 1,2,3 |
|
688 uint32_t bitMapSize = GetBitMapSizeForIndex(i+1); |
|
689 rv = mBlockFile[i].Open(blockFile, blockSize, bitMapSize, corruptInfo); |
|
690 if (NS_FAILED(rv)) { |
|
691 // corruptInfo was set inside the call to mBlockFile[i].Open |
|
692 break; |
|
693 } |
|
694 } |
|
695 // close all files in case of any error |
|
696 if (NS_FAILED(rv)) |
|
697 (void)CloseBlockFiles(false); // we already have an error to report |
|
698 |
|
699 return rv; |
|
700 } |
|
701 |
|
702 |
|
703 nsresult |
|
704 nsDiskCacheMap::CloseBlockFiles(bool flush) |
|
705 { |
|
706 nsresult rv, rv2 = NS_OK; |
|
707 for (int i=0; i < kNumBlockFiles; ++i) { |
|
708 rv = mBlockFile[i].Close(flush); |
|
709 if (NS_FAILED(rv)) rv2 = rv; // if one or more errors, report at least one |
|
710 } |
|
711 return rv2; |
|
712 } |
|
713 |
|
714 |
|
715 bool |
|
716 nsDiskCacheMap::CacheFilesExist() |
|
717 { |
|
718 nsCOMPtr<nsIFile> blockFile; |
|
719 nsresult rv; |
|
720 |
|
721 for (int i = 0; i < kNumBlockFiles; ++i) { |
|
722 bool exists; |
|
723 rv = GetBlockFileForIndex(i, getter_AddRefs(blockFile)); |
|
724 if (NS_FAILED(rv)) return false; |
|
725 |
|
726 rv = blockFile->Exists(&exists); |
|
727 if (NS_FAILED(rv) || !exists) return false; |
|
728 } |
|
729 |
|
730 return true; |
|
731 } |
|
732 |
|
733 |
|
734 nsresult |
|
735 nsDiskCacheMap::CreateCacheSubDirectories() |
|
736 { |
|
737 if (!mCacheDirectory) |
|
738 return NS_ERROR_UNEXPECTED; |
|
739 |
|
740 for (int32_t index = 0 ; index < 16 ; index++) { |
|
741 nsCOMPtr<nsIFile> file; |
|
742 nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
|
743 if (NS_FAILED(rv)) |
|
744 return rv; |
|
745 |
|
746 rv = file->AppendNative(nsPrintfCString("%X", index)); |
|
747 if (NS_FAILED(rv)) |
|
748 return rv; |
|
749 |
|
750 rv = file->Create(nsIFile::DIRECTORY_TYPE, 0700); |
|
751 if (NS_FAILED(rv)) |
|
752 return rv; |
|
753 } |
|
754 |
|
755 return NS_OK; |
|
756 } |
|
757 |
|
758 |
|
759 nsDiskCacheEntry * |
|
760 nsDiskCacheMap::ReadDiskCacheEntry(nsDiskCacheRecord * record) |
|
761 { |
|
762 CACHE_LOG_DEBUG(("CACHE: ReadDiskCacheEntry [%x]\n", record->HashNumber())); |
|
763 |
|
764 nsresult rv = NS_ERROR_UNEXPECTED; |
|
765 nsDiskCacheEntry * diskEntry = nullptr; |
|
766 uint32_t metaFile = record->MetaFile(); |
|
767 int32_t bytesRead = 0; |
|
768 |
|
769 if (!record->MetaLocationInitialized()) return nullptr; |
|
770 |
|
771 if (metaFile == 0) { // entry/metadata stored in separate file |
|
772 // open and read the file |
|
773 nsCOMPtr<nsIFile> file; |
|
774 rv = GetLocalFileForDiskCacheRecord(record, |
|
775 nsDiskCache::kMetaData, |
|
776 false, |
|
777 getter_AddRefs(file)); |
|
778 NS_ENSURE_SUCCESS(rv, nullptr); |
|
779 |
|
780 CACHE_LOG_DEBUG(("CACHE: nsDiskCacheMap::ReadDiskCacheEntry" |
|
781 "[this=%p] reading disk cache entry", this)); |
|
782 |
|
783 PRFileDesc * fd = nullptr; |
|
784 |
|
785 // open the file - restricted to user, the data could be confidential |
|
786 rv = file->OpenNSPRFileDesc(PR_RDONLY, 00600, &fd); |
|
787 NS_ENSURE_SUCCESS(rv, nullptr); |
|
788 |
|
789 int32_t fileSize = PR_Available(fd); |
|
790 if (fileSize < 0) { |
|
791 // an error occurred. We could call PR_GetError(), but how would that help? |
|
792 rv = NS_ERROR_UNEXPECTED; |
|
793 } else { |
|
794 rv = EnsureBuffer(fileSize); |
|
795 if (NS_SUCCEEDED(rv)) { |
|
796 bytesRead = PR_Read(fd, mBuffer, fileSize); |
|
797 if (bytesRead < fileSize) { |
|
798 rv = NS_ERROR_UNEXPECTED; |
|
799 } |
|
800 } |
|
801 } |
|
802 PR_Close(fd); |
|
803 NS_ENSURE_SUCCESS(rv, nullptr); |
|
804 |
|
805 } else if (metaFile < (kNumBlockFiles + 1)) { |
|
806 // entry/metadata stored in cache block file |
|
807 |
|
808 // allocate buffer |
|
809 uint32_t blockCount = record->MetaBlockCount(); |
|
810 bytesRead = blockCount * GetBlockSizeForIndex(metaFile); |
|
811 |
|
812 rv = EnsureBuffer(bytesRead); |
|
813 NS_ENSURE_SUCCESS(rv, nullptr); |
|
814 |
|
815 // read diskEntry, note when the blocks are at the end of file, |
|
816 // bytesRead may be less than blockSize*blockCount. |
|
817 // But the bytesRead should at least agree with the real disk entry size. |
|
818 rv = mBlockFile[metaFile - 1].ReadBlocks(mBuffer, |
|
819 record->MetaStartBlock(), |
|
820 blockCount, |
|
821 &bytesRead); |
|
822 NS_ENSURE_SUCCESS(rv, nullptr); |
|
823 } |
|
824 diskEntry = (nsDiskCacheEntry *)mBuffer; |
|
825 diskEntry->Unswap(); // disk to memory |
|
826 // Check if calculated size agrees with bytesRead |
|
827 if (bytesRead < 0 || (uint32_t)bytesRead < diskEntry->Size()) |
|
828 return nullptr; |
|
829 |
|
830 // Return the buffer containing the diskEntry structure |
|
831 return diskEntry; |
|
832 } |
|
833 |
|
834 |
|
835 /** |
|
836 * CreateDiskCacheEntry(nsCacheEntry * entry) |
|
837 * |
|
838 * Prepare an nsCacheEntry for writing to disk |
|
839 */ |
|
840 nsDiskCacheEntry * |
|
841 nsDiskCacheMap::CreateDiskCacheEntry(nsDiskCacheBinding * binding, |
|
842 uint32_t * aSize) |
|
843 { |
|
844 nsCacheEntry * entry = binding->mCacheEntry; |
|
845 if (!entry) return nullptr; |
|
846 |
|
847 // Store security info, if it is serializable |
|
848 nsCOMPtr<nsISupports> infoObj = entry->SecurityInfo(); |
|
849 nsCOMPtr<nsISerializable> serializable = do_QueryInterface(infoObj); |
|
850 if (infoObj && !serializable) return nullptr; |
|
851 if (serializable) { |
|
852 nsCString info; |
|
853 nsresult rv = NS_SerializeToString(serializable, info); |
|
854 if (NS_FAILED(rv)) return nullptr; |
|
855 rv = entry->SetMetaDataElement("security-info", info.get()); |
|
856 if (NS_FAILED(rv)) return nullptr; |
|
857 } |
|
858 |
|
859 uint32_t keySize = entry->Key()->Length() + 1; |
|
860 uint32_t metaSize = entry->MetaDataSize(); |
|
861 uint32_t size = sizeof(nsDiskCacheEntry) + keySize + metaSize; |
|
862 |
|
863 if (aSize) *aSize = size; |
|
864 |
|
865 nsresult rv = EnsureBuffer(size); |
|
866 if (NS_FAILED(rv)) return nullptr; |
|
867 |
|
868 nsDiskCacheEntry *diskEntry = (nsDiskCacheEntry *)mBuffer; |
|
869 diskEntry->mHeaderVersion = nsDiskCache::kCurrentVersion; |
|
870 diskEntry->mMetaLocation = binding->mRecord.MetaLocation(); |
|
871 diskEntry->mFetchCount = entry->FetchCount(); |
|
872 diskEntry->mLastFetched = entry->LastFetched(); |
|
873 diskEntry->mLastModified = entry->LastModified(); |
|
874 diskEntry->mExpirationTime = entry->ExpirationTime(); |
|
875 diskEntry->mDataSize = entry->DataSize(); |
|
876 diskEntry->mKeySize = keySize; |
|
877 diskEntry->mMetaDataSize = metaSize; |
|
878 |
|
879 memcpy(diskEntry->Key(), entry->Key()->get(), keySize); |
|
880 |
|
881 rv = entry->FlattenMetaData(diskEntry->MetaData(), metaSize); |
|
882 if (NS_FAILED(rv)) return nullptr; |
|
883 |
|
884 return diskEntry; |
|
885 } |
|
886 |
|
887 |
|
888 nsresult |
|
889 nsDiskCacheMap::WriteDiskCacheEntry(nsDiskCacheBinding * binding) |
|
890 { |
|
891 CACHE_LOG_DEBUG(("CACHE: WriteDiskCacheEntry [%x]\n", |
|
892 binding->mRecord.HashNumber())); |
|
893 |
|
894 mozilla::eventtracer::AutoEventTracer writeDiskCacheEntry( |
|
895 binding->mCacheEntry, |
|
896 mozilla::eventtracer::eExec, |
|
897 mozilla::eventtracer::eDone, |
|
898 "net::cache::WriteDiskCacheEntry"); |
|
899 |
|
900 nsresult rv = NS_OK; |
|
901 uint32_t size; |
|
902 nsDiskCacheEntry * diskEntry = CreateDiskCacheEntry(binding, &size); |
|
903 if (!diskEntry) return NS_ERROR_UNEXPECTED; |
|
904 |
|
905 uint32_t fileIndex = CalculateFileIndex(size); |
|
906 |
|
907 // Deallocate old storage if necessary |
|
908 if (binding->mRecord.MetaLocationInitialized()) { |
|
909 // we have existing storage |
|
910 |
|
911 if ((binding->mRecord.MetaFile() == 0) && |
|
912 (fileIndex == 0)) { // keeping the separate file |
|
913 // just decrement total |
|
914 DecrementTotalSize(binding->mRecord.MetaFileSize()); |
|
915 NS_ASSERTION(binding->mRecord.MetaFileGeneration() == binding->mGeneration, |
|
916 "generations out of sync"); |
|
917 } else { |
|
918 rv = DeleteStorage(&binding->mRecord, nsDiskCache::kMetaData); |
|
919 NS_ENSURE_SUCCESS(rv, rv); |
|
920 } |
|
921 } |
|
922 |
|
923 binding->mRecord.SetEvictionRank(ULONG_MAX - SecondsFromPRTime(PR_Now())); |
|
924 // write entry data to disk cache block file |
|
925 diskEntry->Swap(); |
|
926 |
|
927 if (fileIndex != 0) { |
|
928 while (1) { |
|
929 uint32_t blockSize = GetBlockSizeForIndex(fileIndex); |
|
930 uint32_t blocks = ((size - 1) / blockSize) + 1; |
|
931 |
|
932 int32_t startBlock; |
|
933 rv = mBlockFile[fileIndex - 1].WriteBlocks(diskEntry, size, blocks, |
|
934 &startBlock); |
|
935 if (NS_SUCCEEDED(rv)) { |
|
936 // update binding and cache map record |
|
937 binding->mRecord.SetMetaBlocks(fileIndex, startBlock, blocks); |
|
938 |
|
939 rv = UpdateRecord(&binding->mRecord); |
|
940 NS_ENSURE_SUCCESS(rv, rv); |
|
941 |
|
942 // XXX we should probably write out bucket ourselves |
|
943 |
|
944 IncrementTotalSize(blocks, blockSize); |
|
945 break; |
|
946 } |
|
947 |
|
948 if (fileIndex == kNumBlockFiles) { |
|
949 fileIndex = 0; // write data to separate file |
|
950 break; |
|
951 } |
|
952 |
|
953 // try next block file |
|
954 fileIndex++; |
|
955 } |
|
956 } |
|
957 |
|
958 if (fileIndex == 0) { |
|
959 // Write entry data to separate file |
|
960 uint32_t metaFileSizeK = ((size + 0x03FF) >> 10); // round up to nearest 1k |
|
961 if (metaFileSizeK > kMaxDataSizeK) |
|
962 metaFileSizeK = kMaxDataSizeK; |
|
963 |
|
964 binding->mRecord.SetMetaFileGeneration(binding->mGeneration); |
|
965 binding->mRecord.SetMetaFileSize(metaFileSizeK); |
|
966 rv = UpdateRecord(&binding->mRecord); |
|
967 NS_ENSURE_SUCCESS(rv, rv); |
|
968 |
|
969 nsCOMPtr<nsIFile> localFile; |
|
970 rv = GetLocalFileForDiskCacheRecord(&binding->mRecord, |
|
971 nsDiskCache::kMetaData, |
|
972 true, |
|
973 getter_AddRefs(localFile)); |
|
974 NS_ENSURE_SUCCESS(rv, rv); |
|
975 |
|
976 // open the file |
|
977 PRFileDesc * fd; |
|
978 // open the file - restricted to user, the data could be confidential |
|
979 rv = localFile->OpenNSPRFileDesc(PR_RDWR | PR_TRUNCATE | PR_CREATE_FILE, 00600, &fd); |
|
980 NS_ENSURE_SUCCESS(rv, rv); |
|
981 |
|
982 // write the file |
|
983 int32_t bytesWritten = PR_Write(fd, diskEntry, size); |
|
984 |
|
985 PRStatus err = PR_Close(fd); |
|
986 if ((bytesWritten != (int32_t)size) || (err != PR_SUCCESS)) { |
|
987 return NS_ERROR_UNEXPECTED; |
|
988 } |
|
989 |
|
990 IncrementTotalSize(metaFileSizeK); |
|
991 } |
|
992 |
|
993 return rv; |
|
994 } |
|
995 |
|
996 |
|
997 nsresult |
|
998 nsDiskCacheMap::ReadDataCacheBlocks(nsDiskCacheBinding * binding, char * buffer, uint32_t size) |
|
999 { |
|
1000 CACHE_LOG_DEBUG(("CACHE: ReadDataCacheBlocks [%x size=%u]\n", |
|
1001 binding->mRecord.HashNumber(), size)); |
|
1002 |
|
1003 uint32_t fileIndex = binding->mRecord.DataFile(); |
|
1004 int32_t readSize = size; |
|
1005 |
|
1006 nsresult rv = mBlockFile[fileIndex - 1].ReadBlocks(buffer, |
|
1007 binding->mRecord.DataStartBlock(), |
|
1008 binding->mRecord.DataBlockCount(), |
|
1009 &readSize); |
|
1010 NS_ENSURE_SUCCESS(rv, rv); |
|
1011 if (readSize < (int32_t)size) { |
|
1012 rv = NS_ERROR_UNEXPECTED; |
|
1013 } |
|
1014 return rv; |
|
1015 } |
|
1016 |
|
1017 |
|
1018 nsresult |
|
1019 nsDiskCacheMap::WriteDataCacheBlocks(nsDiskCacheBinding * binding, char * buffer, uint32_t size) |
|
1020 { |
|
1021 CACHE_LOG_DEBUG(("CACHE: WriteDataCacheBlocks [%x size=%u]\n", |
|
1022 binding->mRecord.HashNumber(), size)); |
|
1023 |
|
1024 mozilla::eventtracer::AutoEventTracer writeDataCacheBlocks( |
|
1025 binding->mCacheEntry, |
|
1026 mozilla::eventtracer::eExec, |
|
1027 mozilla::eventtracer::eDone, |
|
1028 "net::cache::WriteDataCacheBlocks"); |
|
1029 |
|
1030 nsresult rv = NS_OK; |
|
1031 |
|
1032 // determine block file & number of blocks |
|
1033 uint32_t fileIndex = CalculateFileIndex(size); |
|
1034 uint32_t blockCount = 0; |
|
1035 int32_t startBlock = 0; |
|
1036 |
|
1037 if (size > 0) { |
|
1038 // if fileIndex is 0, bad things happen below, which makes gcc 4.7 |
|
1039 // complain, but it's not supposed to happen. See bug 854105. |
|
1040 MOZ_ASSERT(fileIndex); |
|
1041 while (fileIndex) { |
|
1042 uint32_t blockSize = GetBlockSizeForIndex(fileIndex); |
|
1043 blockCount = ((size - 1) / blockSize) + 1; |
|
1044 |
|
1045 rv = mBlockFile[fileIndex - 1].WriteBlocks(buffer, size, blockCount, |
|
1046 &startBlock); |
|
1047 if (NS_SUCCEEDED(rv)) { |
|
1048 IncrementTotalSize(blockCount, blockSize); |
|
1049 break; |
|
1050 } |
|
1051 |
|
1052 if (fileIndex == kNumBlockFiles) |
|
1053 return rv; |
|
1054 |
|
1055 fileIndex++; |
|
1056 } |
|
1057 } |
|
1058 |
|
1059 // update binding and cache map record |
|
1060 binding->mRecord.SetDataBlocks(fileIndex, startBlock, blockCount); |
|
1061 if (!binding->mDoomed) { |
|
1062 rv = UpdateRecord(&binding->mRecord); |
|
1063 } |
|
1064 return rv; |
|
1065 } |
|
1066 |
|
1067 |
|
1068 nsresult |
|
1069 nsDiskCacheMap::DeleteStorage(nsDiskCacheRecord * record) |
|
1070 { |
|
1071 nsresult rv1 = DeleteStorage(record, nsDiskCache::kData); |
|
1072 nsresult rv2 = DeleteStorage(record, nsDiskCache::kMetaData); |
|
1073 return NS_FAILED(rv1) ? rv1 : rv2; |
|
1074 } |
|
1075 |
|
1076 |
|
1077 nsresult |
|
1078 nsDiskCacheMap::DeleteStorage(nsDiskCacheRecord * record, bool metaData) |
|
1079 { |
|
1080 CACHE_LOG_DEBUG(("CACHE: DeleteStorage [%x %u]\n", record->HashNumber(), |
|
1081 metaData)); |
|
1082 |
|
1083 nsresult rv = NS_ERROR_UNEXPECTED; |
|
1084 uint32_t fileIndex = metaData ? record->MetaFile() : record->DataFile(); |
|
1085 nsCOMPtr<nsIFile> file; |
|
1086 |
|
1087 if (fileIndex == 0) { |
|
1088 // delete the file |
|
1089 uint32_t sizeK = metaData ? record->MetaFileSize() : record->DataFileSize(); |
|
1090 // XXX if sizeK == USHRT_MAX, stat file for actual size |
|
1091 |
|
1092 rv = GetFileForDiskCacheRecord(record, metaData, false, getter_AddRefs(file)); |
|
1093 if (NS_SUCCEEDED(rv)) { |
|
1094 rv = file->Remove(false); // false == non-recursive |
|
1095 } |
|
1096 DecrementTotalSize(sizeK); |
|
1097 |
|
1098 } else if (fileIndex < (kNumBlockFiles + 1)) { |
|
1099 // deallocate blocks |
|
1100 uint32_t startBlock = metaData ? record->MetaStartBlock() : record->DataStartBlock(); |
|
1101 uint32_t blockCount = metaData ? record->MetaBlockCount() : record->DataBlockCount(); |
|
1102 |
|
1103 rv = mBlockFile[fileIndex - 1].DeallocateBlocks(startBlock, blockCount); |
|
1104 DecrementTotalSize(blockCount, GetBlockSizeForIndex(fileIndex)); |
|
1105 } |
|
1106 if (metaData) record->ClearMetaLocation(); |
|
1107 else record->ClearDataLocation(); |
|
1108 |
|
1109 return rv; |
|
1110 } |
|
1111 |
|
1112 |
|
1113 nsresult |
|
1114 nsDiskCacheMap::GetFileForDiskCacheRecord(nsDiskCacheRecord * record, |
|
1115 bool meta, |
|
1116 bool createPath, |
|
1117 nsIFile ** result) |
|
1118 { |
|
1119 if (!mCacheDirectory) return NS_ERROR_NOT_AVAILABLE; |
|
1120 |
|
1121 nsCOMPtr<nsIFile> file; |
|
1122 nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
|
1123 if (NS_FAILED(rv)) return rv; |
|
1124 |
|
1125 uint32_t hash = record->HashNumber(); |
|
1126 |
|
1127 // The file is stored under subdirectories according to the hash number: |
|
1128 // 0x01234567 -> 0/12/ |
|
1129 rv = file->AppendNative(nsPrintfCString("%X", hash >> 28)); |
|
1130 if (NS_FAILED(rv)) return rv; |
|
1131 rv = file->AppendNative(nsPrintfCString("%02X", (hash >> 20) & 0xFF)); |
|
1132 if (NS_FAILED(rv)) return rv; |
|
1133 |
|
1134 bool exists; |
|
1135 if (createPath && (NS_FAILED(file->Exists(&exists)) || !exists)) { |
|
1136 rv = file->Create(nsIFile::DIRECTORY_TYPE, 0700); |
|
1137 if (NS_FAILED(rv)) return rv; |
|
1138 } |
|
1139 |
|
1140 int16_t generation = record->Generation(); |
|
1141 char name[32]; |
|
1142 // Cut the beginning of the hash that was used in the path |
|
1143 ::sprintf(name, "%05X%c%02X", hash & 0xFFFFF, (meta ? 'm' : 'd'), |
|
1144 generation); |
|
1145 rv = file->AppendNative(nsDependentCString(name)); |
|
1146 if (NS_FAILED(rv)) return rv; |
|
1147 |
|
1148 NS_IF_ADDREF(*result = file); |
|
1149 return rv; |
|
1150 } |
|
1151 |
|
1152 |
|
1153 nsresult |
|
1154 nsDiskCacheMap::GetLocalFileForDiskCacheRecord(nsDiskCacheRecord * record, |
|
1155 bool meta, |
|
1156 bool createPath, |
|
1157 nsIFile ** result) |
|
1158 { |
|
1159 nsCOMPtr<nsIFile> file; |
|
1160 nsresult rv = GetFileForDiskCacheRecord(record, |
|
1161 meta, |
|
1162 createPath, |
|
1163 getter_AddRefs(file)); |
|
1164 if (NS_FAILED(rv)) return rv; |
|
1165 |
|
1166 NS_IF_ADDREF(*result = file); |
|
1167 return rv; |
|
1168 } |
|
1169 |
|
1170 |
|
1171 nsresult |
|
1172 nsDiskCacheMap::GetBlockFileForIndex(uint32_t index, nsIFile ** result) |
|
1173 { |
|
1174 if (!mCacheDirectory) return NS_ERROR_NOT_AVAILABLE; |
|
1175 |
|
1176 nsCOMPtr<nsIFile> file; |
|
1177 nsresult rv = mCacheDirectory->Clone(getter_AddRefs(file)); |
|
1178 if (NS_FAILED(rv)) return rv; |
|
1179 |
|
1180 char name[32]; |
|
1181 ::sprintf(name, "_CACHE_%03d_", index + 1); |
|
1182 rv = file->AppendNative(nsDependentCString(name)); |
|
1183 if (NS_FAILED(rv)) return rv; |
|
1184 |
|
1185 NS_IF_ADDREF(*result = file); |
|
1186 |
|
1187 return rv; |
|
1188 } |
|
1189 |
|
1190 |
|
1191 uint32_t |
|
1192 nsDiskCacheMap::CalculateFileIndex(uint32_t size) |
|
1193 { |
|
1194 // We prefer to use block file with larger block if the wasted space would |
|
1195 // be the same. E.g. store entry with size of 3073 bytes in 1 4K-block |
|
1196 // instead of in 4 1K-blocks. |
|
1197 |
|
1198 if (size <= 3 * BLOCK_SIZE_FOR_INDEX(1)) return 1; |
|
1199 if (size <= 3 * BLOCK_SIZE_FOR_INDEX(2)) return 2; |
|
1200 if (size <= 4 * BLOCK_SIZE_FOR_INDEX(3)) return 3; |
|
1201 return 0; |
|
1202 } |
|
1203 |
|
1204 nsresult |
|
1205 nsDiskCacheMap::EnsureBuffer(uint32_t bufSize) |
|
1206 { |
|
1207 if (mBufferSize < bufSize) { |
|
1208 char * buf = (char *)PR_REALLOC(mBuffer, bufSize); |
|
1209 if (!buf) { |
|
1210 mBufferSize = 0; |
|
1211 return NS_ERROR_OUT_OF_MEMORY; |
|
1212 } |
|
1213 mBuffer = buf; |
|
1214 mBufferSize = bufSize; |
|
1215 } |
|
1216 return NS_OK; |
|
1217 } |
|
1218 |
|
1219 void |
|
1220 nsDiskCacheMap::NotifyCapacityChange(uint32_t capacity) |
|
1221 { |
|
1222 // Heuristic 1. average cache entry size is probably around 1KB |
|
1223 // Heuristic 2. we don't want more than 32MB reserved to store the record |
|
1224 // map in memory. |
|
1225 const int32_t RECORD_COUNT_LIMIT = 32 * 1024 * 1024 / sizeof(nsDiskCacheRecord); |
|
1226 int32_t maxRecordCount = std::min(int32_t(capacity), RECORD_COUNT_LIMIT); |
|
1227 if (mMaxRecordCount < maxRecordCount) { |
|
1228 // We can only grow |
|
1229 mMaxRecordCount = maxRecordCount; |
|
1230 } |
|
1231 } |
|
1232 |
|
1233 size_t |
|
1234 nsDiskCacheMap::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) |
|
1235 { |
|
1236 size_t usage = aMallocSizeOf(mRecordArray); |
|
1237 |
|
1238 usage += aMallocSizeOf(mBuffer); |
|
1239 usage += aMallocSizeOf(mMapFD); |
|
1240 usage += aMallocSizeOf(mCleanFD); |
|
1241 usage += aMallocSizeOf(mCacheDirectory); |
|
1242 usage += aMallocSizeOf(mCleanCacheTimer); |
|
1243 |
|
1244 for (int i = 0; i < kNumBlockFiles; i++) { |
|
1245 usage += mBlockFile[i].SizeOfExcludingThis(aMallocSizeOf); |
|
1246 } |
|
1247 |
|
1248 return usage; |
|
1249 } |
|
1250 |
|
1251 nsresult |
|
1252 nsDiskCacheMap::InitCacheClean(nsIFile * cacheDirectory, |
|
1253 nsDiskCache::CorruptCacheInfo * corruptInfo, |
|
1254 bool reportCacheCleanTelemetryData) |
|
1255 { |
|
1256 // The _CACHE_CLEAN_ file will be used in the future to determine |
|
1257 // if the cache is clean or not. |
|
1258 bool cacheCleanFileExists = false; |
|
1259 nsCOMPtr<nsIFile> cacheCleanFile; |
|
1260 nsresult rv = cacheDirectory->GetParent(getter_AddRefs(cacheCleanFile)); |
|
1261 if (NS_SUCCEEDED(rv)) { |
|
1262 rv = cacheCleanFile->AppendNative( |
|
1263 NS_LITERAL_CSTRING("_CACHE_CLEAN_")); |
|
1264 if (NS_SUCCEEDED(rv)) { |
|
1265 // Check if the file already exists, if it does, we will later read the |
|
1266 // value and report it to telemetry. |
|
1267 cacheCleanFile->Exists(&cacheCleanFileExists); |
|
1268 } |
|
1269 } |
|
1270 if (NS_FAILED(rv)) { |
|
1271 NS_WARNING("Could not build cache clean file path"); |
|
1272 *corruptInfo = nsDiskCache::kCacheCleanFilePathError; |
|
1273 return rv; |
|
1274 } |
|
1275 |
|
1276 // Make sure the _CACHE_CLEAN_ file exists |
|
1277 rv = cacheCleanFile->OpenNSPRFileDesc(PR_RDWR | PR_CREATE_FILE, |
|
1278 00600, &mCleanFD); |
|
1279 if (NS_FAILED(rv)) { |
|
1280 NS_WARNING("Could not open cache clean file"); |
|
1281 *corruptInfo = nsDiskCache::kCacheCleanOpenFileError; |
|
1282 return rv; |
|
1283 } |
|
1284 |
|
1285 if (cacheCleanFileExists) { |
|
1286 char clean = '0'; |
|
1287 int32_t bytesRead = PR_Read(mCleanFD, &clean, 1); |
|
1288 if (bytesRead != 1) { |
|
1289 NS_WARNING("Could not read _CACHE_CLEAN_ file contents"); |
|
1290 } else if (reportCacheCleanTelemetryData) { |
|
1291 Telemetry::Accumulate(Telemetry::DISK_CACHE_REDUCTION_TRIAL, |
|
1292 clean == '1' ? 1 : 0); |
|
1293 } |
|
1294 } |
|
1295 |
|
1296 // Create a timer that will be used to validate the cache |
|
1297 // as long as an activity threshold was met |
|
1298 mCleanCacheTimer = do_CreateInstance("@mozilla.org/timer;1", &rv); |
|
1299 if (NS_SUCCEEDED(rv)) { |
|
1300 mCleanCacheTimer->SetTarget(nsCacheService::GlobalInstance()->mCacheIOThread); |
|
1301 rv = ResetCacheTimer(); |
|
1302 } |
|
1303 |
|
1304 if (NS_FAILED(rv)) { |
|
1305 NS_WARNING("Could not create cache clean timer"); |
|
1306 mCleanCacheTimer = nullptr; |
|
1307 *corruptInfo = nsDiskCache::kCacheCleanTimerError; |
|
1308 return rv; |
|
1309 } |
|
1310 |
|
1311 return NS_OK; |
|
1312 } |
|
1313 |
|
1314 nsresult |
|
1315 nsDiskCacheMap::WriteCacheClean(bool clean) |
|
1316 { |
|
1317 nsCacheService::AssertOwnsLock(); |
|
1318 if (!mCleanFD) { |
|
1319 NS_WARNING("Cache clean file is not open!"); |
|
1320 return NS_ERROR_FAILURE; |
|
1321 } |
|
1322 |
|
1323 CACHE_LOG_DEBUG(("CACHE: WriteCacheClean: %d\n", clean? 1 : 0)); |
|
1324 // I'm using a simple '1' or '0' to denote cache clean |
|
1325 // since it can be edited easily by any text editor for testing. |
|
1326 char data = clean? '1' : '0'; |
|
1327 int32_t filePos = PR_Seek(mCleanFD, 0, PR_SEEK_SET); |
|
1328 if (filePos != 0) { |
|
1329 NS_WARNING("Could not seek in cache clean file!"); |
|
1330 return NS_ERROR_FAILURE; |
|
1331 } |
|
1332 int32_t bytesWritten = PR_Write(mCleanFD, &data, 1); |
|
1333 if (bytesWritten != 1) { |
|
1334 NS_WARNING("Could not write cache clean file!"); |
|
1335 return NS_ERROR_FAILURE; |
|
1336 } |
|
1337 PRStatus err = PR_Sync(mCleanFD); |
|
1338 if (err != PR_SUCCESS) { |
|
1339 NS_WARNING("Could not flush cache clean file!"); |
|
1340 } |
|
1341 |
|
1342 return NS_OK; |
|
1343 } |
|
1344 |
|
1345 nsresult |
|
1346 nsDiskCacheMap::InvalidateCache() |
|
1347 { |
|
1348 nsCacheService::AssertOwnsLock(); |
|
1349 CACHE_LOG_DEBUG(("CACHE: InvalidateCache\n")); |
|
1350 nsresult rv; |
|
1351 |
|
1352 if (!mIsDirtyCacheFlushed) { |
|
1353 rv = WriteCacheClean(false); |
|
1354 if (NS_FAILED(rv)) { |
|
1355 Telemetry::Accumulate(Telemetry::DISK_CACHE_INVALIDATION_SUCCESS, 0); |
|
1356 return rv; |
|
1357 } |
|
1358 |
|
1359 Telemetry::Accumulate(Telemetry::DISK_CACHE_INVALIDATION_SUCCESS, 1); |
|
1360 mIsDirtyCacheFlushed = true; |
|
1361 } |
|
1362 |
|
1363 rv = ResetCacheTimer(); |
|
1364 NS_ENSURE_SUCCESS(rv, rv); |
|
1365 |
|
1366 return NS_OK; |
|
1367 } |
|
1368 |
|
1369 nsresult |
|
1370 nsDiskCacheMap::ResetCacheTimer(int32_t timeout) |
|
1371 { |
|
1372 mCleanCacheTimer->Cancel(); |
|
1373 nsresult rv = |
|
1374 mCleanCacheTimer->InitWithFuncCallback(RevalidateTimerCallback, |
|
1375 nullptr, timeout, |
|
1376 nsITimer::TYPE_ONE_SHOT); |
|
1377 NS_ENSURE_SUCCESS(rv, rv); |
|
1378 mLastInvalidateTime = PR_IntervalNow(); |
|
1379 |
|
1380 return rv; |
|
1381 } |
|
1382 |
|
1383 void |
|
1384 nsDiskCacheMap::RevalidateTimerCallback(nsITimer *aTimer, void *arg) |
|
1385 { |
|
1386 nsCacheServiceAutoLock lock(LOCK_TELEM(NSDISKCACHEMAP_REVALIDATION)); |
|
1387 if (!nsCacheService::gService->mDiskDevice || |
|
1388 !nsCacheService::gService->mDiskDevice->Initialized()) { |
|
1389 return; |
|
1390 } |
|
1391 |
|
1392 nsDiskCacheMap *diskCacheMap = |
|
1393 &nsCacheService::gService->mDiskDevice->mCacheMap; |
|
1394 |
|
1395 // If we have less than kRevalidateCacheTimeout since the last timer was |
|
1396 // issued then another thread called InvalidateCache. This won't catch |
|
1397 // all cases where we wanted to cancel the timer, but under the lock it |
|
1398 // is always OK to revalidate as long as IsCacheInSafeState() returns |
|
1399 // true. We just want to avoid revalidating when we can to reduce IO |
|
1400 // and this check will do that. |
|
1401 uint32_t delta = |
|
1402 PR_IntervalToMilliseconds(PR_IntervalNow() - |
|
1403 diskCacheMap->mLastInvalidateTime) + |
|
1404 kRevalidateCacheTimeoutTolerance; |
|
1405 if (delta < kRevalidateCacheTimeout) { |
|
1406 diskCacheMap->ResetCacheTimer(); |
|
1407 return; |
|
1408 } |
|
1409 |
|
1410 nsresult rv = diskCacheMap->RevalidateCache(); |
|
1411 if (NS_FAILED(rv)) { |
|
1412 diskCacheMap->ResetCacheTimer(kRevalidateCacheErrorTimeout); |
|
1413 } |
|
1414 } |
|
1415 |
|
1416 bool |
|
1417 nsDiskCacheMap::IsCacheInSafeState() |
|
1418 { |
|
1419 return nsCacheService::GlobalInstance()->IsDoomListEmpty(); |
|
1420 } |
|
1421 |
|
1422 nsresult |
|
1423 nsDiskCacheMap::RevalidateCache() |
|
1424 { |
|
1425 CACHE_LOG_DEBUG(("CACHE: RevalidateCache\n")); |
|
1426 nsresult rv; |
|
1427 |
|
1428 if (!IsCacheInSafeState()) { |
|
1429 Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SAFE, 0); |
|
1430 CACHE_LOG_DEBUG(("CACHE: Revalidation should not performed because " |
|
1431 "cache not in a safe state\n")); |
|
1432 // Normally we would return an error here, but there is a bug where |
|
1433 // the doom list sometimes gets an entry 'stuck' and doens't clear it |
|
1434 // until browser shutdown. So we allow revalidation for the time being |
|
1435 // to get proper telemetry data of how much the cache corruption plan |
|
1436 // would help. |
|
1437 } else { |
|
1438 Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SAFE, 1); |
|
1439 } |
|
1440 |
|
1441 // We want this after the lock to prove that flushing a file isn't that expensive |
|
1442 Telemetry::AutoTimer<Telemetry::NETWORK_DISK_CACHE_REVALIDATION> totalTimer; |
|
1443 |
|
1444 // If telemetry data shows it is worth it, we'll be flushing headers and |
|
1445 // records before flushing the clean cache file. |
|
1446 |
|
1447 // Write out the _CACHE_CLEAN_ file with '1' |
|
1448 rv = WriteCacheClean(true); |
|
1449 if (NS_FAILED(rv)) { |
|
1450 Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SUCCESS, 0); |
|
1451 return rv; |
|
1452 } |
|
1453 |
|
1454 Telemetry::Accumulate(Telemetry::DISK_CACHE_REVALIDATION_SUCCESS, 1); |
|
1455 mIsDirtyCacheFlushed = false; |
|
1456 |
|
1457 return NS_OK; |
|
1458 } |