michael@0: /* Any copyright is dedicated to the Public Domain. michael@0: * http://creativecommons.org/publicdomain/zero/1.0/ michael@0: */ michael@0: michael@0: // This file tests SQLITE_FCNTL_CHUNK_SIZE behaves as expected michael@0: michael@0: function run_sql(d, sql) { michael@0: var stmt = d.createStatement(sql) michael@0: stmt.execute() michael@0: stmt.finalize(); michael@0: } michael@0: michael@0: function new_file(name) michael@0: { michael@0: var file = dirSvc.get("ProfD", Ci.nsIFile); michael@0: file.append(name); michael@0: return file; michael@0: } michael@0: michael@0: function get_size(name) { michael@0: return new_file(name).fileSize michael@0: } michael@0: michael@0: function run_test() michael@0: { michael@0: const filename = "chunked.sqlite"; michael@0: const CHUNK_SIZE = 512 * 1024; michael@0: var d = getDatabase(new_file(filename)); michael@0: try { michael@0: d.setGrowthIncrement(CHUNK_SIZE, ""); michael@0: } catch (e if e.result == Cr.NS_ERROR_FILE_TOO_BIG) { michael@0: print("Too little free space to set CHUNK_SIZE!"); michael@0: return; michael@0: } michael@0: run_sql(d, "CREATE TABLE bloat(data varchar)"); michael@0: michael@0: var orig_size = get_size(filename); michael@0: /* Dump in at least 32K worth of data. michael@0: * While writing ensure that the file size growth in chunksize set above. michael@0: */ michael@0: const str1024 = new Array(1024).join("T"); michael@0: for(var i = 0; i < 32; i++) { michael@0: run_sql(d, "INSERT INTO bloat VALUES('" + str1024 + "')"); michael@0: var size = get_size(filename) michael@0: // Must not grow in small increments. michael@0: do_check_true(size == orig_size || size >= CHUNK_SIZE); michael@0: } michael@0: /* In addition to growing in chunk-size increments, the db michael@0: * should shrink in chunk-size increments too. michael@0: */ michael@0: run_sql(d, "DELETE FROM bloat") michael@0: run_sql(d, "VACUUM") michael@0: do_check_true(get_size(filename) >= CHUNK_SIZE) michael@0: } michael@0: