HashStore.cpp 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
  2. // Originally based on Chrome sources:
  3. // Copyright (c) 2010 The Chromium Authors. All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #include "HashStore.h"
  31. #include "nsICryptoHash.h"
  32. #include "nsISeekableStream.h"
  33. #include "nsIStreamConverterService.h"
  34. #include "nsNetUtil.h"
  35. #include "nsCheckSummedOutputStream.h"
  36. #include "prio.h"
  37. #include "mozilla/Logging.h"
  38. #include "zlib.h"
  39. #include "Classifier.h"
  40. #include "nsUrlClassifierDBService.h"
  41. // Main store for SafeBrowsing protocol data. We store
  42. // known add/sub chunks, prefixes and completions in memory
  43. // during an update, and serialize to disk.
  44. // We do not store the add prefixes, those are retrieved by
  45. // decompressing the PrefixSet cache whenever we need to apply
  46. // an update.
  47. //
  48. // byte slicing: Many of the 4-byte values stored here are strongly
  49. // correlated in the upper bytes, and uncorrelated in the lower
  50. // bytes. Because zlib/DEFLATE requires match lengths of at least
  51. // 3 to achieve good compression, and we don't get those if only
  52. // the upper 16-bits are correlated, it is worthwhile to slice 32-bit
  53. // values into 4 1-byte slices and compress the slices individually.
  54. // The slices corresponding to MSBs will compress very well, and the
  55. // slice corresponding to LSB almost nothing. Because of this, we
  56. // only apply DEFLATE to the 3 most significant bytes, and store the
  57. // LSB uncompressed.
  58. //
  59. // byte sliced (numValues) data format:
  60. // uint32_t compressed-size
  61. // compressed-size bytes zlib DEFLATE data
  62. // 0...numValues byte MSB of 4-byte numValues data
  63. // uint32_t compressed-size
  64. // compressed-size bytes zlib DEFLATE data
  65. // 0...numValues byte 2nd byte of 4-byte numValues data
  66. // uint32_t compressed-size
  67. // compressed-size bytes zlib DEFLATE data
  68. // 0...numValues byte 3rd byte of 4-byte numValues data
  69. // 0...numValues byte LSB of 4-byte numValues data
  70. //
  71. // Store data format:
  72. // uint32_t magic
  73. // uint32_t version
  74. // uint32_t numAddChunks
  75. // uint32_t numSubChunks
  76. // uint32_t numAddPrefixes
  77. // uint32_t numSubPrefixes
  78. // uint32_t numAddCompletes
  79. // uint32_t numSubCompletes
  80. // 0...numAddChunks uint32_t addChunk
  81. // 0...numSubChunks uint32_t subChunk
  82. // byte sliced (numAddPrefixes) uint32_t add chunk of AddPrefixes
  83. // byte sliced (numSubPrefixes) uint32_t add chunk of SubPrefixes
  84. // byte sliced (numSubPrefixes) uint32_t sub chunk of SubPrefixes
  85. // byte sliced (numSubPrefixes) uint32_t SubPrefixes
  86. // 0...numAddCompletes 32-byte Completions + uint32_t addChunk
  87. // 0...numSubCompletes 32-byte Completions + uint32_t addChunk
  88. // + uint32_t subChunk
  89. // 16-byte MD5 of all preceding data
  90. // Name of the SafeBrowsing store
  91. #define STORE_SUFFIX ".sbstore"
  92. // MOZ_LOG=UrlClassifierDbService:5
  93. extern mozilla::LazyLogModule gUrlClassifierDbServiceLog;
  94. #define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args)
  95. #define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug)
  96. // Either the return was successful or we call the Reset function (unless we
  97. // hit an OOM). Used while reading in the store.
  98. #define SUCCESS_OR_RESET(res) \
  99. do { \
  100. nsresult __rv = res; /* Don't evaluate |res| more than once */ \
  101. if (__rv == NS_ERROR_OUT_OF_MEMORY) { \
  102. NS_WARNING("SafeBrowsing OOM."); \
  103. return __rv; \
  104. } \
  105. if (NS_FAILED(__rv)) { \
  106. NS_WARNING("SafeBrowsing store corrupted or out of date."); \
  107. Reset(); \
  108. return __rv; \
  109. } \
  110. } while(0)
  111. namespace mozilla {
  112. namespace safebrowsing {
  113. const uint32_t STORE_MAGIC = 0x1231af3b;
  114. const uint32_t CURRENT_VERSION = 3;
  115. nsresult
  116. TableUpdateV2::NewAddPrefix(uint32_t aAddChunk, const Prefix& aHash)
  117. {
  118. AddPrefix *add = mAddPrefixes.AppendElement(fallible);
  119. if (!add) return NS_ERROR_OUT_OF_MEMORY;
  120. add->addChunk = aAddChunk;
  121. add->prefix = aHash;
  122. return NS_OK;
  123. }
  124. nsresult
  125. TableUpdateV2::NewSubPrefix(uint32_t aAddChunk, const Prefix& aHash, uint32_t aSubChunk)
  126. {
  127. SubPrefix *sub = mSubPrefixes.AppendElement(fallible);
  128. if (!sub) return NS_ERROR_OUT_OF_MEMORY;
  129. sub->addChunk = aAddChunk;
  130. sub->prefix = aHash;
  131. sub->subChunk = aSubChunk;
  132. return NS_OK;
  133. }
  134. nsresult
  135. TableUpdateV2::NewAddComplete(uint32_t aAddChunk, const Completion& aHash)
  136. {
  137. AddComplete *add = mAddCompletes.AppendElement(fallible);
  138. if (!add) return NS_ERROR_OUT_OF_MEMORY;
  139. add->addChunk = aAddChunk;
  140. add->complete = aHash;
  141. return NS_OK;
  142. }
  143. nsresult
  144. TableUpdateV2::NewSubComplete(uint32_t aAddChunk, const Completion& aHash, uint32_t aSubChunk)
  145. {
  146. SubComplete *sub = mSubCompletes.AppendElement(fallible);
  147. if (!sub) return NS_ERROR_OUT_OF_MEMORY;
  148. sub->addChunk = aAddChunk;
  149. sub->complete = aHash;
  150. sub->subChunk = aSubChunk;
  151. return NS_OK;
  152. }
  153. void
  154. TableUpdateV4::NewPrefixes(int32_t aSize, std::string& aPrefixes)
  155. {
  156. NS_ENSURE_TRUE_VOID(aPrefixes.size() % aSize == 0);
  157. NS_ENSURE_TRUE_VOID(!mPrefixesMap.Get(aSize));
  158. if (LOG_ENABLED() && 4 == aSize) {
  159. int numOfPrefixes = aPrefixes.size() / 4;
  160. uint32_t* p = (uint32_t*)aPrefixes.c_str();
  161. // Dump the first/last 10 fixed-length prefixes for debugging.
  162. LOG(("* The first 10 (maximum) fixed-length prefixes: "));
  163. for (int i = 0; i < std::min(10, numOfPrefixes); i++) {
  164. uint8_t* c = (uint8_t*)&p[i];
  165. LOG(("%.2X%.2X%.2X%.2X", c[0], c[1], c[2], c[3]));
  166. }
  167. LOG(("* The last 10 (maximum) fixed-length prefixes: "));
  168. for (int i = std::max(0, numOfPrefixes - 10); i < numOfPrefixes; i++) {
  169. uint8_t* c = (uint8_t*)&p[i];
  170. LOG(("%.2X%.2X%.2X%.2X", c[0], c[1], c[2], c[3]));
  171. }
  172. LOG(("---- %d fixed-length prefixes in total.", aPrefixes.size() / aSize));
  173. }
  174. PrefixStdString* prefix = new PrefixStdString(aPrefixes);
  175. mPrefixesMap.Put(aSize, prefix);
  176. }
  177. void
  178. TableUpdateV4::NewRemovalIndices(const uint32_t* aIndices, size_t aNumOfIndices)
  179. {
  180. for (size_t i = 0; i < aNumOfIndices; i++) {
  181. mRemovalIndiceArray.AppendElement(aIndices[i]);
  182. }
  183. }
  184. void
  185. TableUpdateV4::NewChecksum(const std::string& aChecksum)
  186. {
  187. mChecksum.Assign(aChecksum.data(), aChecksum.size());
  188. }
  189. HashStore::HashStore(const nsACString& aTableName,
  190. const nsACString& aProvider,
  191. nsIFile* aRootStoreDir)
  192. : mTableName(aTableName)
  193. , mInUpdate(false)
  194. , mFileSize(0)
  195. {
  196. nsresult rv = Classifier::GetPrivateStoreDirectory(aRootStoreDir,
  197. aTableName,
  198. aProvider,
  199. getter_AddRefs(mStoreDirectory));
  200. if (NS_FAILED(rv)) {
  201. LOG(("Failed to get private store directory for %s", mTableName.get()));
  202. mStoreDirectory = aRootStoreDir;
  203. }
  204. }
  205. HashStore::~HashStore()
  206. {
  207. }
  208. nsresult
  209. HashStore::Reset()
  210. {
  211. LOG(("HashStore resetting"));
  212. nsCOMPtr<nsIFile> storeFile;
  213. nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
  214. NS_ENSURE_SUCCESS(rv, rv);
  215. rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(STORE_SUFFIX));
  216. NS_ENSURE_SUCCESS(rv, rv);
  217. rv = storeFile->Remove(false);
  218. NS_ENSURE_SUCCESS(rv, rv);
  219. mFileSize = 0;
  220. return NS_OK;
  221. }
  222. nsresult
  223. HashStore::CheckChecksum(uint32_t aFileSize)
  224. {
  225. if (!mInputStream) {
  226. return NS_OK;
  227. }
  228. // Check for file corruption by
  229. // comparing the stored checksum to actual checksum of data
  230. nsAutoCString hash;
  231. nsAutoCString compareHash;
  232. char *data;
  233. uint32_t read;
  234. nsresult rv = CalculateChecksum(hash, aFileSize, true);
  235. NS_ENSURE_SUCCESS(rv, rv);
  236. compareHash.GetMutableData(&data, hash.Length());
  237. if (hash.Length() > aFileSize) {
  238. NS_WARNING("SafeBrowing file not long enough to store its hash");
  239. return NS_ERROR_FAILURE;
  240. }
  241. nsCOMPtr<nsISeekableStream> seekIn = do_QueryInterface(mInputStream);
  242. rv = seekIn->Seek(nsISeekableStream::NS_SEEK_SET, aFileSize - hash.Length());
  243. NS_ENSURE_SUCCESS(rv, rv);
  244. rv = mInputStream->Read(data, hash.Length(), &read);
  245. NS_ENSURE_SUCCESS(rv, rv);
  246. NS_ASSERTION(read == hash.Length(), "Could not read hash bytes");
  247. if (!hash.Equals(compareHash)) {
  248. NS_WARNING("Safebrowing file failed checksum.");
  249. return NS_ERROR_FAILURE;
  250. }
  251. return NS_OK;
  252. }
  253. nsresult
  254. HashStore::Open()
  255. {
  256. nsCOMPtr<nsIFile> storeFile;
  257. nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
  258. NS_ENSURE_SUCCESS(rv, rv);
  259. rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
  260. NS_ENSURE_SUCCESS(rv, rv);
  261. nsCOMPtr<nsIInputStream> origStream;
  262. rv = NS_NewLocalFileInputStream(getter_AddRefs(origStream), storeFile,
  263. PR_RDONLY | nsIFile::OS_READAHEAD);
  264. if (rv == NS_ERROR_FILE_NOT_FOUND) {
  265. UpdateHeader();
  266. return NS_OK;
  267. } else {
  268. SUCCESS_OR_RESET(rv);
  269. }
  270. int64_t fileSize;
  271. rv = storeFile->GetFileSize(&fileSize);
  272. NS_ENSURE_SUCCESS(rv, rv);
  273. if (fileSize < 0 || fileSize > UINT32_MAX) {
  274. return NS_ERROR_FAILURE;
  275. }
  276. mFileSize = static_cast<uint32_t>(fileSize);
  277. mInputStream = NS_BufferInputStream(origStream, mFileSize);
  278. rv = ReadHeader();
  279. SUCCESS_OR_RESET(rv);
  280. rv = SanityCheck();
  281. SUCCESS_OR_RESET(rv);
  282. return NS_OK;
  283. }
  284. nsresult
  285. HashStore::ReadHeader()
  286. {
  287. if (!mInputStream) {
  288. UpdateHeader();
  289. return NS_OK;
  290. }
  291. nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
  292. nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
  293. NS_ENSURE_SUCCESS(rv, rv);
  294. void *buffer = &mHeader;
  295. rv = NS_ReadInputStreamToBuffer(mInputStream,
  296. &buffer,
  297. sizeof(Header));
  298. NS_ENSURE_SUCCESS(rv, rv);
  299. return NS_OK;
  300. }
  301. nsresult
  302. HashStore::SanityCheck()
  303. {
  304. if (mHeader.magic != STORE_MAGIC || mHeader.version != CURRENT_VERSION) {
  305. NS_WARNING("Unexpected header data in the store.");
  306. return NS_ERROR_FAILURE;
  307. }
  308. return NS_OK;
  309. }
  310. nsresult
  311. HashStore::CalculateChecksum(nsAutoCString& aChecksum,
  312. uint32_t aFileSize,
  313. bool aChecksumPresent)
  314. {
  315. aChecksum.Truncate();
  316. // Reset mInputStream to start
  317. nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
  318. nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0);
  319. nsCOMPtr<nsICryptoHash> hash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
  320. NS_ENSURE_SUCCESS(rv, rv);
  321. // Size of MD5 hash in bytes
  322. const uint32_t CHECKSUM_SIZE = 16;
  323. // MD5 is not a secure hash function, but since this is a filesystem integrity
  324. // check, this usage is ok.
  325. rv = hash->Init(nsICryptoHash::MD5);
  326. NS_ENSURE_SUCCESS(rv, rv);
  327. if (!aChecksumPresent) {
  328. // Hash entire file
  329. rv = hash->UpdateFromStream(mInputStream, UINT32_MAX);
  330. } else {
  331. // Hash everything but last checksum bytes
  332. if (aFileSize < CHECKSUM_SIZE) {
  333. NS_WARNING("SafeBrowsing file isn't long enough to store its checksum");
  334. return NS_ERROR_FAILURE;
  335. }
  336. rv = hash->UpdateFromStream(mInputStream, aFileSize - CHECKSUM_SIZE);
  337. }
  338. NS_ENSURE_SUCCESS(rv, rv);
  339. rv = hash->Finish(false, aChecksum);
  340. NS_ENSURE_SUCCESS(rv, rv);
  341. return NS_OK;
  342. }
  343. void
  344. HashStore::UpdateHeader()
  345. {
  346. mHeader.magic = STORE_MAGIC;
  347. mHeader.version = CURRENT_VERSION;
  348. mHeader.numAddChunks = mAddChunks.Length();
  349. mHeader.numSubChunks = mSubChunks.Length();
  350. mHeader.numAddPrefixes = mAddPrefixes.Length();
  351. mHeader.numSubPrefixes = mSubPrefixes.Length();
  352. mHeader.numAddCompletes = mAddCompletes.Length();
  353. mHeader.numSubCompletes = mSubCompletes.Length();
  354. }
  355. nsresult
  356. HashStore::ReadChunkNumbers()
  357. {
  358. if (!mInputStream || AlreadyReadChunkNumbers()) {
  359. return NS_OK;
  360. }
  361. nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
  362. nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET,
  363. sizeof(Header));
  364. NS_ENSURE_SUCCESS(rv, rv);
  365. rv = mAddChunks.Read(mInputStream, mHeader.numAddChunks);
  366. NS_ENSURE_SUCCESS(rv, rv);
  367. NS_ASSERTION(mAddChunks.Length() == mHeader.numAddChunks, "Read the right amount of add chunks.");
  368. rv = mSubChunks.Read(mInputStream, mHeader.numSubChunks);
  369. NS_ENSURE_SUCCESS(rv, rv);
  370. NS_ASSERTION(mSubChunks.Length() == mHeader.numSubChunks, "Read the right amount of sub chunks.");
  371. return NS_OK;
  372. }
  373. nsresult
  374. HashStore::ReadHashes()
  375. {
  376. if (!mInputStream) {
  377. // BeginUpdate has been called but Open hasn't initialized mInputStream,
  378. // because the existing HashStore is empty.
  379. return NS_OK;
  380. }
  381. nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
  382. uint32_t offset = sizeof(Header);
  383. offset += (mHeader.numAddChunks + mHeader.numSubChunks) * sizeof(uint32_t);
  384. nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
  385. NS_ENSURE_SUCCESS(rv, rv);
  386. rv = ReadAddPrefixes();
  387. NS_ENSURE_SUCCESS(rv, rv);
  388. rv = ReadSubPrefixes();
  389. NS_ENSURE_SUCCESS(rv, rv);
  390. // If completions was read before, then we are done here.
  391. if (AlreadyReadCompletions()) {
  392. return NS_OK;
  393. }
  394. rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes);
  395. NS_ENSURE_SUCCESS(rv, rv);
  396. rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes);
  397. NS_ENSURE_SUCCESS(rv, rv);
  398. return NS_OK;
  399. }
  400. nsresult
  401. HashStore::ReadCompletions()
  402. {
  403. if (!mInputStream || AlreadyReadCompletions()) {
  404. return NS_OK;
  405. }
  406. nsCOMPtr<nsIFile> storeFile;
  407. nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
  408. NS_ENSURE_SUCCESS(rv, rv);
  409. rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(STORE_SUFFIX));
  410. NS_ENSURE_SUCCESS(rv, rv);
  411. uint32_t offset = mFileSize -
  412. sizeof(struct AddComplete) * mHeader.numAddCompletes -
  413. sizeof(struct SubComplete) * mHeader.numSubCompletes -
  414. nsCheckSummedOutputStream::CHECKSUM_SIZE;
  415. nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream);
  416. rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset);
  417. NS_ENSURE_SUCCESS(rv, rv);
  418. rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes);
  419. NS_ENSURE_SUCCESS(rv, rv);
  420. rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes);
  421. NS_ENSURE_SUCCESS(rv, rv);
  422. return NS_OK;
  423. }
  424. nsresult
  425. HashStore::PrepareForUpdate()
  426. {
  427. nsresult rv = CheckChecksum(mFileSize);
  428. SUCCESS_OR_RESET(rv);
  429. rv = ReadChunkNumbers();
  430. SUCCESS_OR_RESET(rv);
  431. rv = ReadHashes();
  432. SUCCESS_OR_RESET(rv);
  433. return NS_OK;
  434. }
  435. nsresult
  436. HashStore::BeginUpdate()
  437. {
  438. // Check wether the file is corrupted and read the rest of the store
  439. // in memory.
  440. nsresult rv = PrepareForUpdate();
  441. NS_ENSURE_SUCCESS(rv, rv);
  442. // Close input stream, won't be needed any more and
  443. // we will rewrite ourselves.
  444. if (mInputStream) {
  445. rv = mInputStream->Close();
  446. NS_ENSURE_SUCCESS(rv, rv);
  447. }
  448. mInUpdate = true;
  449. return NS_OK;
  450. }
  451. template<class T>
  452. static nsresult
  453. Merge(ChunkSet* aStoreChunks,
  454. FallibleTArray<T>* aStorePrefixes,
  455. ChunkSet& aUpdateChunks,
  456. FallibleTArray<T>& aUpdatePrefixes,
  457. bool aAllowMerging = false)
  458. {
  459. EntrySort(aUpdatePrefixes);
  460. T* updateIter = aUpdatePrefixes.Elements();
  461. T* updateEnd = aUpdatePrefixes.Elements() + aUpdatePrefixes.Length();
  462. T* storeIter = aStorePrefixes->Elements();
  463. T* storeEnd = aStorePrefixes->Elements() + aStorePrefixes->Length();
  464. // use a separate array so we can keep the iterators valid
  465. // if the nsTArray grows
  466. nsTArray<T> adds;
  467. for (; updateIter != updateEnd; updateIter++) {
  468. // skip this chunk if we already have it, unless we're
  469. // merging completions, in which case we'll always already
  470. // have the chunk from the original prefix
  471. if (aStoreChunks->Has(updateIter->Chunk()))
  472. if (!aAllowMerging)
  473. continue;
  474. // XXX: binary search for insertion point might be faster in common
  475. // case?
  476. while (storeIter < storeEnd && (storeIter->Compare(*updateIter) < 0)) {
  477. // skip forward to matching element (or not...)
  478. storeIter++;
  479. }
  480. // no match, add
  481. if (storeIter == storeEnd
  482. || storeIter->Compare(*updateIter) != 0) {
  483. if (!adds.AppendElement(*updateIter))
  484. return NS_ERROR_OUT_OF_MEMORY;
  485. }
  486. }
  487. // Chunks can be empty, but we should still report we have them
  488. // to make the chunkranges continuous.
  489. aStoreChunks->Merge(aUpdateChunks);
  490. if (!aStorePrefixes->AppendElements(adds, fallible))
  491. return NS_ERROR_OUT_OF_MEMORY;
  492. EntrySort(*aStorePrefixes);
  493. return NS_OK;
  494. }
  495. nsresult
  496. HashStore::ApplyUpdate(TableUpdate &aUpdate)
  497. {
  498. auto updateV2 = TableUpdate::Cast<TableUpdateV2>(&aUpdate);
  499. NS_ENSURE_TRUE(updateV2, NS_ERROR_FAILURE);
  500. TableUpdateV2& update = *updateV2;
  501. nsresult rv = mAddExpirations.Merge(update.AddExpirations());
  502. NS_ENSURE_SUCCESS(rv, rv);
  503. rv = mSubExpirations.Merge(update.SubExpirations());
  504. NS_ENSURE_SUCCESS(rv, rv);
  505. rv = Expire();
  506. NS_ENSURE_SUCCESS(rv, rv);
  507. rv = Merge(&mAddChunks, &mAddPrefixes,
  508. update.AddChunks(), update.AddPrefixes());
  509. NS_ENSURE_SUCCESS(rv, rv);
  510. rv = Merge(&mAddChunks, &mAddCompletes,
  511. update.AddChunks(), update.AddCompletes(), true);
  512. NS_ENSURE_SUCCESS(rv, rv);
  513. rv = Merge(&mSubChunks, &mSubPrefixes,
  514. update.SubChunks(), update.SubPrefixes());
  515. NS_ENSURE_SUCCESS(rv, rv);
  516. rv = Merge(&mSubChunks, &mSubCompletes,
  517. update.SubChunks(), update.SubCompletes(), true);
  518. NS_ENSURE_SUCCESS(rv, rv);
  519. return NS_OK;
  520. }
  521. nsresult
  522. HashStore::Rebuild()
  523. {
  524. NS_ASSERTION(mInUpdate, "Must be in update to rebuild.");
  525. nsresult rv = ProcessSubs();
  526. NS_ENSURE_SUCCESS(rv, rv);
  527. UpdateHeader();
  528. return NS_OK;
  529. }
  530. void
  531. HashStore::ClearCompletes()
  532. {
  533. NS_ASSERTION(mInUpdate, "Must be in update to clear completes.");
  534. mAddCompletes.Clear();
  535. mSubCompletes.Clear();
  536. UpdateHeader();
  537. }
  538. template<class T>
  539. static void
  540. ExpireEntries(FallibleTArray<T>* aEntries, ChunkSet& aExpirations)
  541. {
  542. T* addIter = aEntries->Elements();
  543. T* end = aEntries->Elements() + aEntries->Length();
  544. for (T *iter = addIter; iter != end; iter++) {
  545. if (!aExpirations.Has(iter->Chunk())) {
  546. *addIter = *iter;
  547. addIter++;
  548. }
  549. }
  550. aEntries->TruncateLength(addIter - aEntries->Elements());
  551. }
  552. nsresult
  553. HashStore::Expire()
  554. {
  555. ExpireEntries(&mAddPrefixes, mAddExpirations);
  556. ExpireEntries(&mAddCompletes, mAddExpirations);
  557. ExpireEntries(&mSubPrefixes, mSubExpirations);
  558. ExpireEntries(&mSubCompletes, mSubExpirations);
  559. mAddChunks.Remove(mAddExpirations);
  560. mSubChunks.Remove(mSubExpirations);
  561. mAddExpirations.Clear();
  562. mSubExpirations.Clear();
  563. return NS_OK;
  564. }
  565. template<class T>
  566. nsresult DeflateWriteTArray(nsIOutputStream* aStream, nsTArray<T>& aIn)
  567. {
  568. uLongf insize = aIn.Length() * sizeof(T);
  569. uLongf outsize = compressBound(insize);
  570. FallibleTArray<char> outBuff;
  571. if (!outBuff.SetLength(outsize, fallible)) {
  572. return NS_ERROR_OUT_OF_MEMORY;
  573. }
  574. int zerr = compress(reinterpret_cast<Bytef*>(outBuff.Elements()),
  575. &outsize,
  576. reinterpret_cast<const Bytef*>(aIn.Elements()),
  577. insize);
  578. if (zerr != Z_OK) {
  579. return NS_ERROR_FAILURE;
  580. }
  581. LOG(("DeflateWriteTArray: %d in %d out", insize, outsize));
  582. outBuff.TruncateLength(outsize);
  583. // Length of compressed data stream
  584. uint32_t dataLen = outBuff.Length();
  585. uint32_t written;
  586. nsresult rv = aStream->Write(reinterpret_cast<char*>(&dataLen), sizeof(dataLen), &written);
  587. NS_ENSURE_SUCCESS(rv, rv);
  588. NS_ASSERTION(written == sizeof(dataLen), "Error writing deflate length");
  589. // Store to stream
  590. rv = WriteTArray(aStream, outBuff);
  591. NS_ENSURE_SUCCESS(rv, rv);
  592. return NS_OK;
  593. }
  594. template<class T>
  595. nsresult InflateReadTArray(nsIInputStream* aStream, FallibleTArray<T>* aOut,
  596. uint32_t aExpectedSize)
  597. {
  598. uint32_t inLen;
  599. uint32_t read;
  600. nsresult rv = aStream->Read(reinterpret_cast<char*>(&inLen), sizeof(inLen), &read);
  601. NS_ENSURE_SUCCESS(rv, rv);
  602. NS_ASSERTION(read == sizeof(inLen), "Error reading inflate length");
  603. FallibleTArray<char> inBuff;
  604. if (!inBuff.SetLength(inLen, fallible)) {
  605. return NS_ERROR_OUT_OF_MEMORY;
  606. }
  607. rv = ReadTArray(aStream, &inBuff, inLen);
  608. NS_ENSURE_SUCCESS(rv, rv);
  609. uLongf insize = inLen;
  610. uLongf outsize = aExpectedSize * sizeof(T);
  611. if (!aOut->SetLength(aExpectedSize, fallible)) {
  612. return NS_ERROR_OUT_OF_MEMORY;
  613. }
  614. int zerr = uncompress(reinterpret_cast<Bytef*>(aOut->Elements()),
  615. &outsize,
  616. reinterpret_cast<const Bytef*>(inBuff.Elements()),
  617. insize);
  618. if (zerr != Z_OK) {
  619. return NS_ERROR_FAILURE;
  620. }
  621. LOG(("InflateReadTArray: %d in %d out", insize, outsize));
  622. NS_ASSERTION(outsize == aExpectedSize * sizeof(T), "Decompression size mismatch");
  623. return NS_OK;
  624. }
  625. static nsresult
  626. ByteSliceWrite(nsIOutputStream* aOut, nsTArray<uint32_t>& aData)
  627. {
  628. nsTArray<uint8_t> slice;
  629. uint32_t count = aData.Length();
  630. // Only process one slice at a time to avoid using too much memory.
  631. if (!slice.SetLength(count, fallible)) {
  632. return NS_ERROR_OUT_OF_MEMORY;
  633. }
  634. // Process slice 1.
  635. for (uint32_t i = 0; i < count; i++) {
  636. slice[i] = (aData[i] >> 24);
  637. }
  638. nsresult rv = DeflateWriteTArray(aOut, slice);
  639. NS_ENSURE_SUCCESS(rv, rv);
  640. // Process slice 2.
  641. for (uint32_t i = 0; i < count; i++) {
  642. slice[i] = ((aData[i] >> 16) & 0xFF);
  643. }
  644. rv = DeflateWriteTArray(aOut, slice);
  645. NS_ENSURE_SUCCESS(rv, rv);
  646. // Process slice 3.
  647. for (uint32_t i = 0; i < count; i++) {
  648. slice[i] = ((aData[i] >> 8) & 0xFF);
  649. }
  650. rv = DeflateWriteTArray(aOut, slice);
  651. NS_ENSURE_SUCCESS(rv, rv);
  652. // Process slice 4.
  653. for (uint32_t i = 0; i < count; i++) {
  654. slice[i] = (aData[i] & 0xFF);
  655. }
  656. // The LSB slice is generally uncompressible, don't bother
  657. // compressing it.
  658. rv = WriteTArray(aOut, slice);
  659. NS_ENSURE_SUCCESS(rv, rv);
  660. return NS_OK;
  661. }
  662. static nsresult
  663. ByteSliceRead(nsIInputStream* aInStream, FallibleTArray<uint32_t>* aData, uint32_t count)
  664. {
  665. FallibleTArray<uint8_t> slice1;
  666. FallibleTArray<uint8_t> slice2;
  667. FallibleTArray<uint8_t> slice3;
  668. FallibleTArray<uint8_t> slice4;
  669. nsresult rv = InflateReadTArray(aInStream, &slice1, count);
  670. NS_ENSURE_SUCCESS(rv, rv);
  671. rv = InflateReadTArray(aInStream, &slice2, count);
  672. NS_ENSURE_SUCCESS(rv, rv);
  673. rv = InflateReadTArray(aInStream, &slice3, count);
  674. NS_ENSURE_SUCCESS(rv, rv);
  675. rv = ReadTArray(aInStream, &slice4, count);
  676. NS_ENSURE_SUCCESS(rv, rv);
  677. if (!aData->SetCapacity(count, fallible)) {
  678. return NS_ERROR_OUT_OF_MEMORY;
  679. }
  680. for (uint32_t i = 0; i < count; i++) {
  681. aData->AppendElement((slice1[i] << 24) |
  682. (slice2[i] << 16) |
  683. (slice3[i] << 8) |
  684. (slice4[i]),
  685. fallible);
  686. }
  687. return NS_OK;
  688. }
  689. nsresult
  690. HashStore::ReadAddPrefixes()
  691. {
  692. FallibleTArray<uint32_t> chunks;
  693. uint32_t count = mHeader.numAddPrefixes;
  694. nsresult rv = ByteSliceRead(mInputStream, &chunks, count);
  695. NS_ENSURE_SUCCESS(rv, rv);
  696. if (!mAddPrefixes.SetCapacity(count, fallible)) {
  697. return NS_ERROR_OUT_OF_MEMORY;
  698. }
  699. for (uint32_t i = 0; i < count; i++) {
  700. AddPrefix *add = mAddPrefixes.AppendElement(fallible);
  701. add->prefix.FromUint32(0);
  702. add->addChunk = chunks[i];
  703. }
  704. return NS_OK;
  705. }
  706. nsresult
  707. HashStore::ReadSubPrefixes()
  708. {
  709. FallibleTArray<uint32_t> addchunks;
  710. FallibleTArray<uint32_t> subchunks;
  711. FallibleTArray<uint32_t> prefixes;
  712. uint32_t count = mHeader.numSubPrefixes;
  713. nsresult rv = ByteSliceRead(mInputStream, &addchunks, count);
  714. NS_ENSURE_SUCCESS(rv, rv);
  715. rv = ByteSliceRead(mInputStream, &subchunks, count);
  716. NS_ENSURE_SUCCESS(rv, rv);
  717. rv = ByteSliceRead(mInputStream, &prefixes, count);
  718. NS_ENSURE_SUCCESS(rv, rv);
  719. if (!mSubPrefixes.SetCapacity(count, fallible)) {
  720. return NS_ERROR_OUT_OF_MEMORY;
  721. }
  722. for (uint32_t i = 0; i < count; i++) {
  723. SubPrefix *sub = mSubPrefixes.AppendElement(fallible);
  724. sub->addChunk = addchunks[i];
  725. sub->prefix.FromUint32(prefixes[i]);
  726. sub->subChunk = subchunks[i];
  727. }
  728. return NS_OK;
  729. }
  730. // Split up PrefixArray back into the constituents
  731. nsresult
  732. HashStore::WriteAddPrefixes(nsIOutputStream* aOut)
  733. {
  734. nsTArray<uint32_t> chunks;
  735. uint32_t count = mAddPrefixes.Length();
  736. if (!chunks.SetCapacity(count, fallible)) {
  737. return NS_ERROR_OUT_OF_MEMORY;
  738. }
  739. for (uint32_t i = 0; i < count; i++) {
  740. chunks.AppendElement(mAddPrefixes[i].Chunk());
  741. }
  742. nsresult rv = ByteSliceWrite(aOut, chunks);
  743. NS_ENSURE_SUCCESS(rv, rv);
  744. return NS_OK;
  745. }
  746. nsresult
  747. HashStore::WriteSubPrefixes(nsIOutputStream* aOut)
  748. {
  749. nsTArray<uint32_t> addchunks;
  750. nsTArray<uint32_t> subchunks;
  751. nsTArray<uint32_t> prefixes;
  752. uint32_t count = mSubPrefixes.Length();
  753. addchunks.SetCapacity(count);
  754. subchunks.SetCapacity(count);
  755. prefixes.SetCapacity(count);
  756. for (uint32_t i = 0; i < count; i++) {
  757. addchunks.AppendElement(mSubPrefixes[i].AddChunk());
  758. prefixes.AppendElement(mSubPrefixes[i].PrefixHash().ToUint32());
  759. subchunks.AppendElement(mSubPrefixes[i].Chunk());
  760. }
  761. nsresult rv = ByteSliceWrite(aOut, addchunks);
  762. NS_ENSURE_SUCCESS(rv, rv);
  763. rv = ByteSliceWrite(aOut, subchunks);
  764. NS_ENSURE_SUCCESS(rv, rv);
  765. rv = ByteSliceWrite(aOut, prefixes);
  766. NS_ENSURE_SUCCESS(rv, rv);
  767. return NS_OK;
  768. }
  769. nsresult
  770. HashStore::WriteFile()
  771. {
  772. NS_ASSERTION(mInUpdate, "Must be in update to write database.");
  773. if (nsUrlClassifierDBService::ShutdownHasStarted()) {
  774. return NS_ERROR_ABORT;
  775. }
  776. nsCOMPtr<nsIFile> storeFile;
  777. nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile));
  778. NS_ENSURE_SUCCESS(rv, rv);
  779. rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore"));
  780. NS_ENSURE_SUCCESS(rv, rv);
  781. nsCOMPtr<nsIOutputStream> out;
  782. rv = NS_NewCheckSummedOutputStream(getter_AddRefs(out), storeFile);
  783. NS_ENSURE_SUCCESS(rv, rv);
  784. uint32_t written;
  785. rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written);
  786. NS_ENSURE_SUCCESS(rv, rv);
  787. // Write chunk numbers.
  788. rv = mAddChunks.Write(out);
  789. NS_ENSURE_SUCCESS(rv, rv);
  790. rv = mSubChunks.Write(out);
  791. NS_ENSURE_SUCCESS(rv, rv);
  792. // Write hashes.
  793. rv = WriteAddPrefixes(out);
  794. NS_ENSURE_SUCCESS(rv, rv);
  795. rv = WriteSubPrefixes(out);
  796. NS_ENSURE_SUCCESS(rv, rv);
  797. rv = WriteTArray(out, mAddCompletes);
  798. NS_ENSURE_SUCCESS(rv, rv);
  799. rv = WriteTArray(out, mSubCompletes);
  800. NS_ENSURE_SUCCESS(rv, rv);
  801. nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out, &rv);
  802. NS_ENSURE_SUCCESS(rv, rv);
  803. rv = safeOut->Finish();
  804. NS_ENSURE_SUCCESS(rv, rv);
  805. return NS_OK;
  806. }
  807. template <class T>
  808. static void
  809. Erase(FallibleTArray<T>* array, T* iterStart, T* iterEnd)
  810. {
  811. uint32_t start = iterStart - array->Elements();
  812. uint32_t count = iterEnd - iterStart;
  813. if (count > 0) {
  814. array->RemoveElementsAt(start, count);
  815. }
  816. }
  817. // Find items matching between |subs| and |adds|, and remove them,
  818. // recording the item from |adds| in |adds_removed|. To minimize
  819. // copies, the inputs are processing in parallel, so |subs| and |adds|
  820. // should be compatibly ordered (either by SBAddPrefixLess or
  821. // SBAddPrefixHashLess).
  822. //
  823. // |predAS| provides add < sub, |predSA| provides sub < add, for the
  824. // tightest compare appropriate (see calls in SBProcessSubs).
  825. template<class TSub, class TAdd>
  826. static void
  827. KnockoutSubs(FallibleTArray<TSub>* aSubs, FallibleTArray<TAdd>* aAdds)
  828. {
  829. // Keep a pair of output iterators for writing kept items. Due to
  830. // deletions, these may lag the main iterators. Using erase() on
  831. // individual items would result in O(N^2) copies. Using a list
  832. // would work around that, at double or triple the memory cost.
  833. TAdd* addOut = aAdds->Elements();
  834. TAdd* addIter = aAdds->Elements();
  835. TSub* subOut = aSubs->Elements();
  836. TSub* subIter = aSubs->Elements();
  837. TAdd* addEnd = addIter + aAdds->Length();
  838. TSub* subEnd = subIter + aSubs->Length();
  839. while (addIter != addEnd && subIter != subEnd) {
  840. // additer compare, so it compares on add chunk
  841. int32_t cmp = addIter->Compare(*subIter);
  842. if (cmp > 0) {
  843. // If |*sub_iter| < |*add_iter|, retain the sub.
  844. *subOut = *subIter;
  845. ++subOut;
  846. ++subIter;
  847. } else if (cmp < 0) {
  848. // If |*add_iter| < |*sub_iter|, retain the add.
  849. *addOut = *addIter;
  850. ++addOut;
  851. ++addIter;
  852. } else {
  853. // Drop equal items
  854. ++addIter;
  855. ++subIter;
  856. }
  857. }
  858. Erase(aAdds, addOut, addIter);
  859. Erase(aSubs, subOut, subIter);
  860. }
  861. // Remove items in |removes| from |fullHashes|. |fullHashes| and
  862. // |removes| should be ordered by SBAddPrefix component.
  863. template <class T>
  864. static void
  865. RemoveMatchingPrefixes(const SubPrefixArray& aSubs, FallibleTArray<T>* aFullHashes)
  866. {
  867. // Where to store kept items.
  868. T* out = aFullHashes->Elements();
  869. T* hashIter = out;
  870. T* hashEnd = aFullHashes->Elements() + aFullHashes->Length();
  871. SubPrefix const * removeIter = aSubs.Elements();
  872. SubPrefix const * removeEnd = aSubs.Elements() + aSubs.Length();
  873. while (hashIter != hashEnd && removeIter != removeEnd) {
  874. int32_t cmp = removeIter->CompareAlt(*hashIter);
  875. if (cmp > 0) {
  876. // Keep items less than |*removeIter|.
  877. *out = *hashIter;
  878. ++out;
  879. ++hashIter;
  880. } else if (cmp < 0) {
  881. // No hit for |*removeIter|, bump it forward.
  882. ++removeIter;
  883. } else {
  884. // Drop equal items, there may be multiple hits.
  885. do {
  886. ++hashIter;
  887. } while (hashIter != hashEnd &&
  888. !(removeIter->CompareAlt(*hashIter) < 0));
  889. ++removeIter;
  890. }
  891. }
  892. Erase(aFullHashes, out, hashIter);
  893. }
  894. static void
  895. RemoveDeadSubPrefixes(SubPrefixArray& aSubs, ChunkSet& aAddChunks)
  896. {
  897. SubPrefix * subIter = aSubs.Elements();
  898. SubPrefix * subEnd = aSubs.Elements() + aSubs.Length();
  899. for (SubPrefix * iter = subIter; iter != subEnd; iter++) {
  900. bool hasChunk = aAddChunks.Has(iter->AddChunk());
  901. // Keep the subprefix if the chunk it refers to is one
  902. // we haven't seen it yet.
  903. if (!hasChunk) {
  904. *subIter = *iter;
  905. subIter++;
  906. }
  907. }
  908. LOG(("Removed %u dead SubPrefix entries.", subEnd - subIter));
  909. aSubs.TruncateLength(subIter - aSubs.Elements());
  910. }
  911. #ifdef DEBUG
  912. template <class T>
  913. static void EnsureSorted(FallibleTArray<T>* aArray)
  914. {
  915. T* start = aArray->Elements();
  916. T* end = aArray->Elements() + aArray->Length();
  917. T* iter = start;
  918. T* previous = start;
  919. while (iter != end) {
  920. previous = iter;
  921. ++iter;
  922. if (iter != end) {
  923. MOZ_ASSERT(iter->Compare(*previous) >= 0);
  924. }
  925. }
  926. return;
  927. }
  928. #endif
  929. nsresult
  930. HashStore::ProcessSubs()
  931. {
  932. #ifdef DEBUG
  933. EnsureSorted(&mAddPrefixes);
  934. EnsureSorted(&mSubPrefixes);
  935. EnsureSorted(&mAddCompletes);
  936. EnsureSorted(&mSubCompletes);
  937. LOG(("All databases seem to have a consistent sort order."));
  938. #endif
  939. RemoveMatchingPrefixes(mSubPrefixes, &mAddCompletes);
  940. RemoveMatchingPrefixes(mSubPrefixes, &mSubCompletes);
  941. // Remove any remaining subbed prefixes from both addprefixes
  942. // and addcompletes.
  943. KnockoutSubs(&mSubPrefixes, &mAddPrefixes);
  944. KnockoutSubs(&mSubCompletes, &mAddCompletes);
  945. // Remove any remaining subprefixes referring to addchunks that
  946. // we have (and hence have been processed above).
  947. RemoveDeadSubPrefixes(mSubPrefixes, mAddChunks);
  948. #ifdef DEBUG
  949. EnsureSorted(&mAddPrefixes);
  950. EnsureSorted(&mSubPrefixes);
  951. EnsureSorted(&mAddCompletes);
  952. EnsureSorted(&mSubCompletes);
  953. LOG(("All databases seem to have a consistent sort order."));
  954. #endif
  955. return NS_OK;
  956. }
  957. nsresult
  958. HashStore::AugmentAdds(const nsTArray<uint32_t>& aPrefixes)
  959. {
  960. uint32_t cnt = aPrefixes.Length();
  961. if (cnt != mAddPrefixes.Length()) {
  962. LOG(("Amount of prefixes in cache not consistent with store (%d vs %d)",
  963. aPrefixes.Length(), mAddPrefixes.Length()));
  964. return NS_ERROR_FAILURE;
  965. }
  966. for (uint32_t i = 0; i < cnt; i++) {
  967. mAddPrefixes[i].prefix.FromUint32(aPrefixes[i]);
  968. }
  969. return NS_OK;
  970. }
  971. ChunkSet&
  972. HashStore::AddChunks()
  973. {
  974. ReadChunkNumbers();
  975. return mAddChunks;
  976. }
  977. ChunkSet&
  978. HashStore::SubChunks()
  979. {
  980. ReadChunkNumbers();
  981. return mSubChunks;
  982. }
  983. AddCompleteArray&
  984. HashStore::AddCompletes()
  985. {
  986. ReadCompletions();
  987. return mAddCompletes;
  988. }
  989. SubCompleteArray&
  990. HashStore::SubCompletes()
  991. {
  992. ReadCompletions();
  993. return mSubCompletes;
  994. }
  995. bool
  996. HashStore::AlreadyReadChunkNumbers()
  997. {
  998. // If there are chunks but chunk set not yet contains any data
  999. // Then we haven't read chunk numbers.
  1000. if ((mHeader.numAddChunks != 0 && mAddChunks.Length() == 0) ||
  1001. (mHeader.numSubChunks != 0 && mSubChunks.Length() == 0)) {
  1002. return false;
  1003. }
  1004. return true;
  1005. }
  1006. bool
  1007. HashStore::AlreadyReadCompletions()
  1008. {
  1009. // If there are completions but completion set not yet contains any data
  1010. // Then we haven't read completions.
  1011. if ((mHeader.numAddCompletes != 0 && mAddCompletes.Length() == 0) ||
  1012. (mHeader.numSubCompletes != 0 && mSubCompletes.Length() == 0)) {
  1013. return false;
  1014. }
  1015. return true;
  1016. }
  1017. } // namespace safebrowsing
  1018. } // namespace mozilla