Fri, 16 Jan 2015 18:13:44 +0100
Integrate suggestion from review to improve consistency with existing code.
1 //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 #include "Classifier.h"
7 #include "nsIPrefBranch.h"
8 #include "nsIPrefService.h"
9 #include "nsISimpleEnumerator.h"
10 #include "nsIRandomGenerator.h"
11 #include "nsIInputStream.h"
12 #include "nsISeekableStream.h"
13 #include "nsIFile.h"
14 #include "nsAutoPtr.h"
15 #include "mozilla/Telemetry.h"
16 #include "prlog.h"
18 // NSPR_LOG_MODULES=UrlClassifierDbService:5
19 extern PRLogModuleInfo *gUrlClassifierDbServiceLog;
20 #if defined(PR_LOGGING)
21 #define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
22 #define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
23 #else
24 #define LOG(args)
25 #define LOG_ENABLED() (false)
26 #endif
28 #define STORE_DIRECTORY NS_LITERAL_CSTRING("safebrowsing")
29 #define TO_DELETE_DIR_SUFFIX NS_LITERAL_CSTRING("-to_delete")
30 #define BACKUP_DIR_SUFFIX NS_LITERAL_CSTRING("-backup")
32 namespace mozilla {
33 namespace safebrowsing {
35 void
36 Classifier::SplitTables(const nsACString& str, nsTArray<nsCString>& tables)
37 {
38 tables.Clear();
40 nsACString::const_iterator begin, iter, end;
41 str.BeginReading(begin);
42 str.EndReading(end);
43 while (begin != end) {
44 iter = begin;
45 FindCharInReadable(',', iter, end);
46 nsDependentCSubstring table = Substring(begin,iter);
47 if (!table.IsEmpty()) {
48 tables.AppendElement(Substring(begin, iter));
49 }
50 begin = iter;
51 if (begin != end) {
52 begin++;
53 }
54 }
55 }
57 Classifier::Classifier()
58 : mFreshTime(45 * 60)
59 {
60 }
62 Classifier::~Classifier()
63 {
64 Close();
65 }
67 nsresult
68 Classifier::SetupPathNames()
69 {
70 // Get the root directory where to store all the databases.
71 nsresult rv = mCacheDirectory->Clone(getter_AddRefs(mStoreDirectory));
72 NS_ENSURE_SUCCESS(rv, rv);
74 rv = mStoreDirectory->AppendNative(STORE_DIRECTORY);
75 NS_ENSURE_SUCCESS(rv, rv);
77 // Make sure LookupCaches (which are persistent and survive updates)
78 // are reading/writing in the right place. We will be moving their
79 // files "underneath" them during backup/restore.
80 for (uint32_t i = 0; i < mLookupCaches.Length(); i++) {
81 mLookupCaches[i]->UpdateDirHandle(mStoreDirectory);
82 }
84 // Directory where to move a backup before an update.
85 rv = mCacheDirectory->Clone(getter_AddRefs(mBackupDirectory));
86 NS_ENSURE_SUCCESS(rv, rv);
88 rv = mBackupDirectory->AppendNative(STORE_DIRECTORY + BACKUP_DIR_SUFFIX);
89 NS_ENSURE_SUCCESS(rv, rv);
91 // Directory where to move the backup so we can atomically
92 // delete (really move) it.
93 rv = mCacheDirectory->Clone(getter_AddRefs(mToDeleteDirectory));
94 NS_ENSURE_SUCCESS(rv, rv);
96 rv = mToDeleteDirectory->AppendNative(STORE_DIRECTORY + TO_DELETE_DIR_SUFFIX);
97 NS_ENSURE_SUCCESS(rv, rv);
99 return NS_OK;
100 }
102 nsresult
103 Classifier::CreateStoreDirectory()
104 {
105 // Ensure the safebrowsing directory exists.
106 bool storeExists;
107 nsresult rv = mStoreDirectory->Exists(&storeExists);
108 NS_ENSURE_SUCCESS(rv, rv);
110 if (!storeExists) {
111 rv = mStoreDirectory->Create(nsIFile::DIRECTORY_TYPE, 0755);
112 NS_ENSURE_SUCCESS(rv, rv);
113 } else {
114 bool storeIsDir;
115 rv = mStoreDirectory->IsDirectory(&storeIsDir);
116 NS_ENSURE_SUCCESS(rv, rv);
117 if (!storeIsDir)
118 return NS_ERROR_FILE_DESTINATION_NOT_DIR;
119 }
121 return NS_OK;
122 }
124 nsresult
125 Classifier::Open(nsIFile& aCacheDirectory)
126 {
127 // Remember the Local profile directory.
128 nsresult rv = aCacheDirectory.Clone(getter_AddRefs(mCacheDirectory));
129 NS_ENSURE_SUCCESS(rv, rv);
131 // Create the handles to the update and backup directories.
132 rv = SetupPathNames();
133 NS_ENSURE_SUCCESS(rv, rv);
135 // Clean up any to-delete directories that haven't been deleted yet.
136 rv = CleanToDelete();
137 NS_ENSURE_SUCCESS(rv, rv);
139 // Check whether we have an incomplete update and recover from the
140 // backup if so.
141 rv = RecoverBackups();
142 NS_ENSURE_SUCCESS(rv, rv);
144 // Make sure the main store directory exists.
145 rv = CreateStoreDirectory();
146 NS_ENSURE_SUCCESS(rv, rv);
148 mCryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
149 NS_ENSURE_SUCCESS(rv, rv);
151 // Build the list of know urlclassifier lists
152 // XXX: Disk IO potentially on the main thread during startup
153 RegenActiveTables();
155 return NS_OK;
156 }
158 void
159 Classifier::Close()
160 {
161 DropStores();
162 }
164 void
165 Classifier::Reset()
166 {
167 DropStores();
169 mStoreDirectory->Remove(true);
170 mBackupDirectory->Remove(true);
171 mToDeleteDirectory->Remove(true);
173 CreateStoreDirectory();
175 mTableFreshness.Clear();
176 RegenActiveTables();
177 }
179 void
180 Classifier::TableRequest(nsACString& aResult)
181 {
182 nsTArray<nsCString> tables;
183 ActiveTables(tables);
184 for (uint32_t i = 0; i < tables.Length(); i++) {
185 nsAutoPtr<HashStore> store(new HashStore(tables[i], mStoreDirectory));
186 if (!store)
187 continue;
189 nsresult rv = store->Open();
190 if (NS_FAILED(rv))
191 continue;
193 aResult.Append(store->TableName());
194 aResult.Append(";");
196 ChunkSet &adds = store->AddChunks();
197 ChunkSet &subs = store->SubChunks();
199 if (adds.Length() > 0) {
200 aResult.Append("a:");
201 nsAutoCString addList;
202 adds.Serialize(addList);
203 aResult.Append(addList);
204 }
206 if (subs.Length() > 0) {
207 if (adds.Length() > 0)
208 aResult.Append(':');
209 aResult.Append("s:");
210 nsAutoCString subList;
211 subs.Serialize(subList);
212 aResult.Append(subList);
213 }
215 aResult.Append('\n');
216 }
217 }
219 nsresult
220 Classifier::Check(const nsACString& aSpec,
221 const nsACString& aTables,
222 LookupResultArray& aResults)
223 {
224 Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_CHECK_TIME> timer;
226 // Get the set of fragments based on the url. This is necessary because we
227 // only look up at most 5 URLs per aSpec, even if aSpec has more than 5
228 // components.
229 nsTArray<nsCString> fragments;
230 nsresult rv = LookupCache::GetLookupFragments(aSpec, &fragments);
231 NS_ENSURE_SUCCESS(rv, rv);
233 nsTArray<nsCString> activeTables;
234 SplitTables(aTables, activeTables);
236 nsTArray<LookupCache*> cacheArray;
237 for (uint32_t i = 0; i < activeTables.Length(); i++) {
238 LOG(("Checking table %s", activeTables[i].get()));
239 LookupCache *cache = GetLookupCache(activeTables[i]);
240 if (cache) {
241 cacheArray.AppendElement(cache);
242 } else {
243 return NS_ERROR_FAILURE;
244 }
245 }
247 // Now check each lookup fragment against the entries in the DB.
248 for (uint32_t i = 0; i < fragments.Length(); i++) {
249 Completion lookupHash;
250 lookupHash.FromPlaintext(fragments[i], mCryptoHash);
252 // Get list of host keys to look up
253 Completion hostKey;
254 rv = LookupCache::GetKey(fragments[i], &hostKey, mCryptoHash);
255 if (NS_FAILED(rv)) {
256 // Local host on the network.
257 continue;
258 }
260 #if DEBUG && defined(PR_LOGGING)
261 if (LOG_ENABLED()) {
262 nsAutoCString checking;
263 lookupHash.ToHexString(checking);
264 LOG(("Checking fragment %s, hash %s (%X)", fragments[i].get(),
265 checking.get(), lookupHash.ToUint32()));
266 }
267 #endif
268 for (uint32_t i = 0; i < cacheArray.Length(); i++) {
269 LookupCache *cache = cacheArray[i];
270 bool has, complete;
271 rv = cache->Has(lookupHash, &has, &complete);
272 NS_ENSURE_SUCCESS(rv, rv);
273 if (has) {
274 LookupResult *result = aResults.AppendElement();
275 if (!result)
276 return NS_ERROR_OUT_OF_MEMORY;
278 int64_t age;
279 bool found = mTableFreshness.Get(cache->TableName(), &age);
280 if (!found) {
281 age = 24 * 60 * 60; // just a large number
282 } else {
283 int64_t now = (PR_Now() / PR_USEC_PER_SEC);
284 age = now - age;
285 }
287 LOG(("Found a result in %s: %s (Age: %Lds)",
288 cache->TableName().get(),
289 complete ? "complete." : "Not complete.",
290 age));
292 result->hash.complete = lookupHash;
293 result->mComplete = complete;
294 result->mFresh = (age < mFreshTime);
295 result->mTableName.Assign(cache->TableName());
296 }
297 }
299 }
301 return NS_OK;
302 }
304 nsresult
305 Classifier::ApplyUpdates(nsTArray<TableUpdate*>* aUpdates)
306 {
307 Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_UPDATE_TIME> timer;
309 #if defined(PR_LOGGING)
310 PRIntervalTime clockStart = 0;
311 if (LOG_ENABLED() || true) {
312 clockStart = PR_IntervalNow();
313 }
314 #endif
316 LOG(("Backup before update."));
318 nsresult rv = BackupTables();
319 NS_ENSURE_SUCCESS(rv, rv);
321 LOG(("Applying table updates."));
323 for (uint32_t i = 0; i < aUpdates->Length(); i++) {
324 // Previous ApplyTableUpdates() may have consumed this update..
325 if ((*aUpdates)[i]) {
326 // Run all updates for one table
327 nsCString updateTable(aUpdates->ElementAt(i)->TableName());
328 rv = ApplyTableUpdates(aUpdates, updateTable);
329 if (NS_FAILED(rv)) {
330 if (rv != NS_ERROR_OUT_OF_MEMORY) {
331 Reset();
332 }
333 return rv;
334 }
335 }
336 }
337 aUpdates->Clear();
339 rv = RegenActiveTables();
340 NS_ENSURE_SUCCESS(rv, rv);
342 LOG(("Cleaning up backups."));
344 // Move the backup directory away (signaling the transaction finished
345 // successfully). This is atomic.
346 rv = RemoveBackupTables();
347 NS_ENSURE_SUCCESS(rv, rv);
349 // Do the actual deletion of the backup files.
350 rv = CleanToDelete();
351 NS_ENSURE_SUCCESS(rv, rv);
353 LOG(("Done applying updates."));
355 #if defined(PR_LOGGING)
356 if (LOG_ENABLED() || true) {
357 PRIntervalTime clockEnd = PR_IntervalNow();
358 LOG(("update took %dms\n",
359 PR_IntervalToMilliseconds(clockEnd - clockStart)));
360 }
361 #endif
363 return NS_OK;
364 }
366 nsresult
367 Classifier::MarkSpoiled(nsTArray<nsCString>& aTables)
368 {
369 for (uint32_t i = 0; i < aTables.Length(); i++) {
370 LOG(("Spoiling table: %s", aTables[i].get()));
371 // Spoil this table by marking it as no known freshness
372 mTableFreshness.Remove(aTables[i]);
373 // Remove any cached Completes for this table
374 LookupCache *cache = GetLookupCache(aTables[i]);
375 if (cache) {
376 cache->ClearCompleteCache();
377 }
378 }
379 return NS_OK;
380 }
382 void
383 Classifier::DropStores()
384 {
385 for (uint32_t i = 0; i < mHashStores.Length(); i++) {
386 delete mHashStores[i];
387 }
388 mHashStores.Clear();
389 for (uint32_t i = 0; i < mLookupCaches.Length(); i++) {
390 delete mLookupCaches[i];
391 }
392 mLookupCaches.Clear();
393 }
395 nsresult
396 Classifier::RegenActiveTables()
397 {
398 mActiveTablesCache.Clear();
400 nsTArray<nsCString> foundTables;
401 ScanStoreDir(foundTables);
403 for (uint32_t i = 0; i < foundTables.Length(); i++) {
404 nsAutoPtr<HashStore> store(new HashStore(nsCString(foundTables[i]), mStoreDirectory));
405 if (!store)
406 return NS_ERROR_OUT_OF_MEMORY;
408 nsresult rv = store->Open();
409 if (NS_FAILED(rv))
410 continue;
412 LookupCache *lookupCache = GetLookupCache(store->TableName());
413 if (!lookupCache) {
414 continue;
415 }
417 if (!lookupCache->IsPrimed())
418 continue;
420 const ChunkSet &adds = store->AddChunks();
421 const ChunkSet &subs = store->SubChunks();
423 if (adds.Length() == 0 && subs.Length() == 0)
424 continue;
426 LOG(("Active table: %s", store->TableName().get()));
427 mActiveTablesCache.AppendElement(store->TableName());
428 }
430 return NS_OK;
431 }
433 nsresult
434 Classifier::ScanStoreDir(nsTArray<nsCString>& aTables)
435 {
436 nsCOMPtr<nsISimpleEnumerator> entries;
437 nsresult rv = mStoreDirectory->GetDirectoryEntries(getter_AddRefs(entries));
438 NS_ENSURE_SUCCESS(rv, rv);
440 bool hasMore;
441 while (NS_SUCCEEDED(rv = entries->HasMoreElements(&hasMore)) && hasMore) {
442 nsCOMPtr<nsISupports> supports;
443 rv = entries->GetNext(getter_AddRefs(supports));
444 NS_ENSURE_SUCCESS(rv, rv);
446 nsCOMPtr<nsIFile> file = do_QueryInterface(supports);
448 nsCString leafName;
449 rv = file->GetNativeLeafName(leafName);
450 NS_ENSURE_SUCCESS(rv, rv);
452 nsCString suffix(NS_LITERAL_CSTRING(".sbstore"));
454 int32_t dot = leafName.RFind(suffix, 0);
455 if (dot != -1) {
456 leafName.Cut(dot, suffix.Length());
457 aTables.AppendElement(leafName);
458 }
459 }
460 NS_ENSURE_SUCCESS(rv, rv);
462 return NS_OK;
463 }
465 nsresult
466 Classifier::ActiveTables(nsTArray<nsCString>& aTables)
467 {
468 aTables = mActiveTablesCache;
469 return NS_OK;
470 }
472 nsresult
473 Classifier::CleanToDelete()
474 {
475 bool exists;
476 nsresult rv = mToDeleteDirectory->Exists(&exists);
477 NS_ENSURE_SUCCESS(rv, rv);
479 if (exists) {
480 rv = mToDeleteDirectory->Remove(true);
481 NS_ENSURE_SUCCESS(rv, rv);
482 }
484 return NS_OK;
485 }
487 nsresult
488 Classifier::BackupTables()
489 {
490 // We have to work in reverse here: first move the normal directory
491 // away to be the backup directory, then copy the files over
492 // to the normal directory. This ensures that if we crash the backup
493 // dir always has a valid, complete copy, instead of a partial one,
494 // because that's the one we will copy over the normal store dir.
496 nsCString backupDirName;
497 nsresult rv = mBackupDirectory->GetNativeLeafName(backupDirName);
498 NS_ENSURE_SUCCESS(rv, rv);
500 nsCString storeDirName;
501 rv = mStoreDirectory->GetNativeLeafName(storeDirName);
502 NS_ENSURE_SUCCESS(rv, rv);
504 rv = mStoreDirectory->MoveToNative(nullptr, backupDirName);
505 NS_ENSURE_SUCCESS(rv, rv);
507 rv = mStoreDirectory->CopyToNative(nullptr, storeDirName);
508 NS_ENSURE_SUCCESS(rv, rv);
510 // We moved some things to new places, so move the handles around, too.
511 rv = SetupPathNames();
512 NS_ENSURE_SUCCESS(rv, rv);
514 return NS_OK;
515 }
517 nsresult
518 Classifier::RemoveBackupTables()
519 {
520 nsCString toDeleteName;
521 nsresult rv = mToDeleteDirectory->GetNativeLeafName(toDeleteName);
522 NS_ENSURE_SUCCESS(rv, rv);
524 rv = mBackupDirectory->MoveToNative(nullptr, toDeleteName);
525 NS_ENSURE_SUCCESS(rv, rv);
527 // mBackupDirectory now points to toDelete, fix that up.
528 rv = SetupPathNames();
529 NS_ENSURE_SUCCESS(rv, rv);
531 return NS_OK;
532 }
534 nsresult
535 Classifier::RecoverBackups()
536 {
537 bool backupExists;
538 nsresult rv = mBackupDirectory->Exists(&backupExists);
539 NS_ENSURE_SUCCESS(rv, rv);
541 if (backupExists) {
542 // Remove the safebrowsing dir if it exists
543 nsCString storeDirName;
544 rv = mStoreDirectory->GetNativeLeafName(storeDirName);
545 NS_ENSURE_SUCCESS(rv, rv);
547 bool storeExists;
548 rv = mStoreDirectory->Exists(&storeExists);
549 NS_ENSURE_SUCCESS(rv, rv);
551 if (storeExists) {
552 rv = mStoreDirectory->Remove(true);
553 NS_ENSURE_SUCCESS(rv, rv);
554 }
556 // Move the backup to the store location
557 rv = mBackupDirectory->MoveToNative(nullptr, storeDirName);
558 NS_ENSURE_SUCCESS(rv, rv);
560 // mBackupDirectory now points to storeDir, fix up.
561 rv = SetupPathNames();
562 NS_ENSURE_SUCCESS(rv, rv);
563 }
565 return NS_OK;
566 }
568 /*
569 * This will consume+delete updates from the passed nsTArray.
570 */
571 nsresult
572 Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
573 const nsACString& aTable)
574 {
575 LOG(("Classifier::ApplyTableUpdates(%s)", PromiseFlatCString(aTable).get()));
577 nsAutoPtr<HashStore> store(new HashStore(aTable, mStoreDirectory));
579 if (!store)
580 return NS_ERROR_FAILURE;
582 // take the quick exit if there is no valid update for us
583 // (common case)
584 uint32_t validupdates = 0;
586 for (uint32_t i = 0; i < aUpdates->Length(); i++) {
587 TableUpdate *update = aUpdates->ElementAt(i);
588 if (!update || !update->TableName().Equals(store->TableName()))
589 continue;
590 if (update->Empty()) {
591 aUpdates->ElementAt(i) = nullptr;
592 delete update;
593 continue;
594 }
595 validupdates++;
596 }
598 if (!validupdates) {
599 // This can happen if the update was only valid for one table.
600 return NS_OK;
601 }
603 nsresult rv = store->Open();
604 NS_ENSURE_SUCCESS(rv, rv);
605 rv = store->BeginUpdate();
606 NS_ENSURE_SUCCESS(rv, rv);
608 // Read the part of the store that is (only) in the cache
609 LookupCache *prefixSet = GetLookupCache(store->TableName());
610 if (!prefixSet) {
611 return NS_ERROR_FAILURE;
612 }
613 nsTArray<uint32_t> AddPrefixHashes;
614 rv = prefixSet->GetPrefixes(&AddPrefixHashes);
615 NS_ENSURE_SUCCESS(rv, rv);
616 rv = store->AugmentAdds(AddPrefixHashes);
617 NS_ENSURE_SUCCESS(rv, rv);
618 AddPrefixHashes.Clear();
620 uint32_t applied = 0;
621 bool updateFreshness = false;
622 bool hasCompletes = false;
624 for (uint32_t i = 0; i < aUpdates->Length(); i++) {
625 TableUpdate *update = aUpdates->ElementAt(i);
626 if (!update || !update->TableName().Equals(store->TableName()))
627 continue;
629 rv = store->ApplyUpdate(*update);
630 NS_ENSURE_SUCCESS(rv, rv);
632 applied++;
634 LOG(("Applied update to table %s:", store->TableName().get()));
635 LOG((" %d add chunks", update->AddChunks().Length()));
636 LOG((" %d add prefixes", update->AddPrefixes().Length()));
637 LOG((" %d add completions", update->AddCompletes().Length()));
638 LOG((" %d sub chunks", update->SubChunks().Length()));
639 LOG((" %d sub prefixes", update->SubPrefixes().Length()));
640 LOG((" %d sub completions", update->SubCompletes().Length()));
641 LOG((" %d add expirations", update->AddExpirations().Length()));
642 LOG((" %d sub expirations", update->SubExpirations().Length()));
644 if (!update->IsLocalUpdate()) {
645 updateFreshness = true;
646 LOG(("Remote update, updating freshness"));
647 }
649 if (update->AddCompletes().Length() > 0
650 || update->SubCompletes().Length() > 0) {
651 hasCompletes = true;
652 LOG(("Contains Completes, keeping cache."));
653 }
655 aUpdates->ElementAt(i) = nullptr;
656 delete update;
657 }
659 LOG(("Applied %d update(s) to %s.", applied, store->TableName().get()));
661 rv = store->Rebuild();
662 NS_ENSURE_SUCCESS(rv, rv);
664 // Not an update with Completes, clear all completes data.
665 if (!hasCompletes) {
666 store->ClearCompletes();
667 }
669 LOG(("Table %s now has:", store->TableName().get()));
670 LOG((" %d add chunks", store->AddChunks().Length()));
671 LOG((" %d add prefixes", store->AddPrefixes().Length()));
672 LOG((" %d add completions", store->AddCompletes().Length()));
673 LOG((" %d sub chunks", store->SubChunks().Length()));
674 LOG((" %d sub prefixes", store->SubPrefixes().Length()));
675 LOG((" %d sub completions", store->SubCompletes().Length()));
677 rv = store->WriteFile();
678 NS_ENSURE_SUCCESS(rv, rv);
680 // At this point the store is updated and written out to disk, but
681 // the data is still in memory. Build our quick-lookup table here.
682 rv = prefixSet->Build(store->AddPrefixes(), store->AddCompletes());
683 NS_ENSURE_SUCCESS(rv, rv);
685 #if defined(DEBUG) && defined(PR_LOGGING)
686 prefixSet->Dump();
687 #endif
688 rv = prefixSet->WriteFile();
689 NS_ENSURE_SUCCESS(rv, rv);
691 if (updateFreshness) {
692 int64_t now = (PR_Now() / PR_USEC_PER_SEC);
693 LOG(("Successfully updated %s", store->TableName().get()));
694 mTableFreshness.Put(store->TableName(), now);
695 }
697 return NS_OK;
698 }
700 LookupCache *
701 Classifier::GetLookupCache(const nsACString& aTable)
702 {
703 for (uint32_t i = 0; i < mLookupCaches.Length(); i++) {
704 if (mLookupCaches[i]->TableName().Equals(aTable)) {
705 return mLookupCaches[i];
706 }
707 }
709 LookupCache *cache = new LookupCache(aTable, mStoreDirectory);
710 nsresult rv = cache->Init();
711 if (NS_FAILED(rv)) {
712 return nullptr;
713 }
714 rv = cache->Open();
715 if (NS_FAILED(rv)) {
716 if (rv == NS_ERROR_FILE_CORRUPTED) {
717 Reset();
718 }
719 return nullptr;
720 }
721 mLookupCaches.AppendElement(cache);
722 return cache;
723 }
725 nsresult
726 Classifier::ReadNoiseEntries(const Prefix& aPrefix,
727 const nsACString& aTableName,
728 uint32_t aCount,
729 PrefixArray* aNoiseEntries)
730 {
731 LookupCache *cache = GetLookupCache(aTableName);
732 if (!cache) {
733 return NS_ERROR_FAILURE;
734 }
736 nsTArray<uint32_t> prefixes;
737 nsresult rv = cache->GetPrefixes(&prefixes);
738 NS_ENSURE_SUCCESS(rv, rv);
740 uint32_t idx = prefixes.BinaryIndexOf(aPrefix.ToUint32());
742 if (idx == nsTArray<uint32_t>::NoIndex) {
743 NS_WARNING("Could not find prefix in PrefixSet during noise lookup");
744 return NS_ERROR_FAILURE;
745 }
747 idx -= idx % aCount;
749 for (uint32_t i = 0; (i < aCount) && ((idx+i) < prefixes.Length()); i++) {
750 Prefix newPref;
751 newPref.FromUint32(prefixes[idx+i]);
752 if (newPref != aPrefix) {
753 aNoiseEntries->AppendElement(newPref);
754 }
755 }
757 return NS_OK;
758 }
760 } // namespace safebrowsing
761 } // namespace mozilla