michael@0: /* Any copyright is dedicated to the Public Domain. michael@0: * http://creativecommons.org/publicdomain/zero/1.0/ */ michael@0: michael@0: Cu.import("resource://services-sync/constants.js"); michael@0: Cu.import("resource://services-sync/engines.js"); michael@0: Cu.import("resource://services-sync/policies.js"); michael@0: Cu.import("resource://services-sync/record.js"); michael@0: Cu.import("resource://services-sync/resource.js"); michael@0: Cu.import("resource://services-sync/service.js"); michael@0: Cu.import("resource://services-sync/util.js"); michael@0: Cu.import("resource://testing-common/services/sync/rotaryengine.js"); michael@0: Cu.import("resource://testing-common/services/sync/utils.js"); michael@0: michael@0: function makeRotaryEngine() { michael@0: return new RotaryEngine(Service); michael@0: } michael@0: michael@0: function cleanAndGo(server) { michael@0: Svc.Prefs.resetBranch(""); michael@0: Svc.Prefs.set("log.logger.engine.rotary", "Trace"); michael@0: Service.recordManager.clearCache(); michael@0: server.stop(run_next_test); michael@0: } michael@0: michael@0: function configureService(server, username, password) { michael@0: Service.clusterURL = server.baseURI; michael@0: michael@0: Service.identity.account = username || "foo"; michael@0: Service.identity.basicPassword = password || "password"; michael@0: } michael@0: michael@0: function createServerAndConfigureClient() { michael@0: let engine = new RotaryEngine(Service); michael@0: michael@0: let contents = { michael@0: meta: {global: {engines: {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}}}, michael@0: crypto: {}, michael@0: rotary: {} michael@0: }; michael@0: michael@0: const USER = "foo"; michael@0: let server = new SyncServer(); michael@0: server.registerUser(USER, "password"); michael@0: server.createContents(USER, contents); michael@0: server.start(); michael@0: michael@0: Service.serverURL = server.baseURI; michael@0: Service.clusterURL = server.baseURI; michael@0: Service.identity.username = USER; michael@0: Service._updateCachedURLs(); michael@0: michael@0: return [engine, server, USER]; michael@0: } michael@0: michael@0: function run_test() { michael@0: generateNewKeys(Service.collectionKeys); michael@0: Svc.Prefs.set("log.logger.engine.rotary", "Trace"); michael@0: run_next_test(); michael@0: } michael@0: michael@0: /* michael@0: * Tests michael@0: * michael@0: * SyncEngine._sync() is divided into four rather independent steps: michael@0: * michael@0: * - _syncStartup() michael@0: * - _processIncoming() michael@0: * - _uploadOutgoing() michael@0: * - _syncFinish() michael@0: * michael@0: * In the spirit of unit testing, these are tested individually for michael@0: * different scenarios below. michael@0: */ michael@0: michael@0: add_test(function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() { michael@0: _("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record"); michael@0: michael@0: // Some server side data that's going to be wiped michael@0: let collection = new ServerCollection(); michael@0: collection.insert('flying', michael@0: encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: collection.insert('scotsman', michael@0: encryptPayload({id: 'scotsman', michael@0: denomination: "Flying Scotsman"})); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine._store.items = {rekolok: "Rekonstruktionslokomotive"}; michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine._tracker.changedIDs["rekolok"], undefined); michael@0: let metaGlobal = Service.recordManager.get(engine.metaURL); michael@0: do_check_eq(metaGlobal.payload.engines, undefined); michael@0: do_check_true(!!collection.payload("flying")); michael@0: do_check_true(!!collection.payload("scotsman")); michael@0: michael@0: engine.lastSync = Date.now() / 1000; michael@0: engine.lastSyncLocal = Date.now(); michael@0: michael@0: // Trying to prompt a wipe -- we no longer track CryptoMeta per engine, michael@0: // so it has nothing to check. michael@0: engine._syncStartup(); michael@0: michael@0: // The meta/global WBO has been filled with data about the engine michael@0: let engineData = metaGlobal.payload.engines["rotary"]; michael@0: do_check_eq(engineData.version, engine.version); michael@0: do_check_eq(engineData.syncID, engine.syncID); michael@0: michael@0: // Sync was reset and server data was wiped michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: do_check_eq(collection.payload("scotsman"), undefined); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: add_test(function test_syncStartup_serverHasNewerVersion() { michael@0: _("SyncEngine._syncStartup "); michael@0: michael@0: let global = new ServerWBO('global', {engines: {rotary: {version: 23456}}}); michael@0: let server = httpd_setup({ michael@0: "/1.1/foo/storage/meta/global": global.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: michael@0: // The server has a newer version of the data and our engine can michael@0: // handle. That should give us an exception. michael@0: let error; michael@0: try { michael@0: engine._syncStartup(); michael@0: } catch (ex) { michael@0: error = ex; michael@0: } michael@0: do_check_eq(error.failureCode, VERSION_OUT_OF_DATE); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_syncStartup_syncIDMismatchResetsClient() { michael@0: _("SyncEngine._syncStartup resets sync if syncIDs don't match"); michael@0: michael@0: let server = sync_httpd_setup({}); michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: // global record with a different syncID than our engine has michael@0: let engine = makeRotaryEngine(); michael@0: let global = new ServerWBO('global', michael@0: {engines: {rotary: {version: engine.version, michael@0: syncID: 'foobar'}}}); michael@0: server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler()); michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.syncID, 'fake-guid-0'); michael@0: do_check_eq(engine._tracker.changedIDs["rekolok"], undefined); michael@0: michael@0: engine.lastSync = Date.now() / 1000; michael@0: engine.lastSyncLocal = Date.now(); michael@0: engine._syncStartup(); michael@0: michael@0: // The engine has assumed the server's syncID michael@0: do_check_eq(engine.syncID, 'foobar'); michael@0: michael@0: // Sync was reset michael@0: do_check_eq(engine.lastSync, 0); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_emptyServer() { michael@0: _("SyncEngine._processIncoming working with an empty server backend"); michael@0: michael@0: let collection = new ServerCollection(); michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: michael@0: // Merely ensure that this code path is run without any errors michael@0: engine._processIncoming(); michael@0: do_check_eq(engine.lastSync, 0); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_createFromServer() { michael@0: _("SyncEngine._processIncoming creates new records from server data"); michael@0: michael@0: // Some server records that will be downloaded michael@0: let collection = new ServerCollection(); michael@0: collection.insert('flying', michael@0: encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: collection.insert('scotsman', michael@0: encryptPayload({id: 'scotsman', michael@0: denomination: "Flying Scotsman"})); michael@0: michael@0: // Two pathological cases involving relative URIs gone wrong. michael@0: let pathologicalPayload = encryptPayload({id: '../pathological', michael@0: denomination: "Pathological Case"}); michael@0: collection.insert('../pathological', pathologicalPayload); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler(), michael@0: "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(), michael@0: "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: generateNewKeys(Service.collectionKeys); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_eq(engine.lastModified, null); michael@0: do_check_eq(engine._store.items.flying, undefined); michael@0: do_check_eq(engine._store.items.scotsman, undefined); michael@0: do_check_eq(engine._store.items['../pathological'], undefined); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Timestamps of last sync and last server modification are set. michael@0: do_check_true(engine.lastSync > 0); michael@0: do_check_true(engine.lastModified > 0); michael@0: michael@0: // Local records have been created from the server data. michael@0: do_check_eq(engine._store.items.flying, "LNER Class A3 4472"); michael@0: do_check_eq(engine._store.items.scotsman, "Flying Scotsman"); michael@0: do_check_eq(engine._store.items['../pathological'], "Pathological Case"); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_reconcile() { michael@0: _("SyncEngine._processIncoming updates local records"); michael@0: michael@0: let collection = new ServerCollection(); michael@0: michael@0: // This server record is newer than the corresponding client one, michael@0: // so it'll update its data. michael@0: collection.insert('newrecord', michael@0: encryptPayload({id: 'newrecord', michael@0: denomination: "New stuff..."})); michael@0: michael@0: // This server record is newer than the corresponding client one, michael@0: // so it'll update its data. michael@0: collection.insert('newerserver', michael@0: encryptPayload({id: 'newerserver', michael@0: denomination: "New data!"})); michael@0: michael@0: // This server record is 2 mins older than the client counterpart michael@0: // but identical to it, so we're expecting the client record's michael@0: // changedID to be reset. michael@0: collection.insert('olderidentical', michael@0: encryptPayload({id: 'olderidentical', michael@0: denomination: "Older but identical"})); michael@0: collection._wbos.olderidentical.modified -= 120; michael@0: michael@0: // This item simply has different data than the corresponding client michael@0: // record (which is unmodified), so it will update the client as well michael@0: collection.insert('updateclient', michael@0: encryptPayload({id: 'updateclient', michael@0: denomination: "Get this!"})); michael@0: michael@0: // This is a dupe of 'original'. michael@0: collection.insert('duplication', michael@0: encryptPayload({id: 'duplication', michael@0: denomination: "Original Entry"})); michael@0: michael@0: // This record is marked as deleted, so we're expecting the client michael@0: // record to be removed. michael@0: collection.insert('nukeme', michael@0: encryptPayload({id: 'nukeme', michael@0: denomination: "Nuke me!", michael@0: deleted: true})); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: Service.identity.username = "foo"; michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine._store.items = {newerserver: "New data, but not as new as server!", michael@0: olderidentical: "Older but identical", michael@0: updateclient: "Got data?", michael@0: original: "Original Entry", michael@0: long_original: "Long Original Entry", michael@0: nukeme: "Nuke me!"}; michael@0: // Make this record 1 min old, thus older than the one on the server michael@0: engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60); michael@0: // This record has been changed 2 mins later than the one on the server michael@0: engine._tracker.addChangedID('olderidentical', Date.now()/1000); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine._store.items.newrecord, undefined); michael@0: do_check_eq(engine._store.items.newerserver, "New data, but not as new as server!"); michael@0: do_check_eq(engine._store.items.olderidentical, "Older but identical"); michael@0: do_check_eq(engine._store.items.updateclient, "Got data?"); michael@0: do_check_eq(engine._store.items.nukeme, "Nuke me!"); michael@0: do_check_true(engine._tracker.changedIDs['olderidentical'] > 0); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Timestamps of last sync and last server modification are set. michael@0: do_check_true(engine.lastSync > 0); michael@0: do_check_true(engine.lastModified > 0); michael@0: michael@0: // The new record is created. michael@0: do_check_eq(engine._store.items.newrecord, "New stuff..."); michael@0: michael@0: // The 'newerserver' record is updated since the server data is newer. michael@0: do_check_eq(engine._store.items.newerserver, "New data!"); michael@0: michael@0: // The data for 'olderidentical' is identical on the server, so michael@0: // it's no longer marked as changed anymore. michael@0: do_check_eq(engine._store.items.olderidentical, "Older but identical"); michael@0: do_check_eq(engine._tracker.changedIDs['olderidentical'], undefined); michael@0: michael@0: // Updated with server data. michael@0: do_check_eq(engine._store.items.updateclient, "Get this!"); michael@0: michael@0: // The incoming ID is preferred. michael@0: do_check_eq(engine._store.items.original, undefined); michael@0: do_check_eq(engine._store.items.duplication, "Original Entry"); michael@0: do_check_neq(engine._delete.ids.indexOf("original"), -1); michael@0: michael@0: // The 'nukeme' record marked as deleted is removed. michael@0: do_check_eq(engine._store.items.nukeme, undefined); michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_local_deleted() { michael@0: _("Ensure local, duplicate ID is deleted on server."); michael@0: michael@0: // When a duplicate is resolved, the local ID (which is never taken) should michael@0: // be deleted on the server. michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); michael@0: let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: let record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"}); michael@0: let wbo = new ServerWBO("DUPE_LOCAL", record, now - 1); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); michael@0: do_check_true(engine._store.itemExists("DUPE_LOCAL")); michael@0: do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); michael@0: michael@0: engine._sync(); michael@0: michael@0: do_check_attribute_count(engine._store.items, 1); michael@0: do_check_true("DUPE_INCOMING" in engine._store.items); michael@0: michael@0: let collection = server.getCollection(user, "rotary"); michael@0: do_check_eq(1, collection.count()); michael@0: do_check_neq(undefined, collection.wbo("DUPE_INCOMING")); michael@0: michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_equivalent() { michael@0: _("Ensure proper handling of incoming records that match local."); michael@0: michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: let record = encryptPayload({id: "entry", denomination: "denomination"}); michael@0: let wbo = new ServerWBO("entry", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: engine._store.items = {entry: "denomination"}; michael@0: do_check_true(engine._store.itemExists("entry")); michael@0: michael@0: engine._sync(); michael@0: michael@0: do_check_attribute_count(engine._store.items, 1); michael@0: michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() { michael@0: _("Ensure locally deleted duplicate record newer than incoming is handled."); michael@0: michael@0: // This is a somewhat complicated test. It ensures that if a client receives michael@0: // a modified record for an item that is deleted locally but with a different michael@0: // ID that the incoming record is ignored. This is a corner case for record michael@0: // handling, but it needs to be supported. michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); michael@0: let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: // Simulate a locally-deleted item. michael@0: engine._store.items = {}; michael@0: engine._tracker.addChangedID("DUPE_LOCAL", now + 3); michael@0: do_check_false(engine._store.itemExists("DUPE_LOCAL")); michael@0: do_check_false(engine._store.itemExists("DUPE_INCOMING")); michael@0: do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); michael@0: michael@0: engine._sync(); michael@0: michael@0: // After the sync, the server's payload for the original ID should be marked michael@0: // as deleted. michael@0: do_check_empty(engine._store.items); michael@0: let collection = server.getCollection(user, "rotary"); michael@0: do_check_eq(1, collection.count()); michael@0: let wbo = collection.wbo("DUPE_INCOMING"); michael@0: do_check_neq(null, wbo); michael@0: let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); michael@0: do_check_true(payload.deleted); michael@0: michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() { michael@0: _("Ensure locally deleted duplicate record older than incoming is restored."); michael@0: michael@0: // This is similar to the above test except it tests the condition where the michael@0: // incoming record is newer than the local deletion, therefore overriding it. michael@0: michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); michael@0: let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: // Simulate a locally-deleted item. michael@0: engine._store.items = {}; michael@0: engine._tracker.addChangedID("DUPE_LOCAL", now + 1); michael@0: do_check_false(engine._store.itemExists("DUPE_LOCAL")); michael@0: do_check_false(engine._store.itemExists("DUPE_INCOMING")); michael@0: do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); michael@0: michael@0: engine._sync(); michael@0: michael@0: // Since the remote change is newer, the incoming item should exist locally. michael@0: do_check_attribute_count(engine._store.items, 1); michael@0: do_check_true("DUPE_INCOMING" in engine._store.items); michael@0: do_check_eq("incoming", engine._store.items.DUPE_INCOMING); michael@0: michael@0: let collection = server.getCollection(user, "rotary"); michael@0: do_check_eq(1, collection.count()); michael@0: let wbo = collection.wbo("DUPE_INCOMING"); michael@0: let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); michael@0: do_check_eq("incoming", payload.denomination); michael@0: michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_changed_dupe() { michael@0: _("Ensure that locally changed duplicate record is handled properly."); michael@0: michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: // The local record is newer than the incoming one, so it should be retained. michael@0: let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); michael@0: let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); michael@0: engine._tracker.addChangedID("DUPE_LOCAL", now + 3); michael@0: do_check_true(engine._store.itemExists("DUPE_LOCAL")); michael@0: do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); michael@0: michael@0: engine._sync(); michael@0: michael@0: // The ID should have been changed to incoming. michael@0: do_check_attribute_count(engine._store.items, 1); michael@0: do_check_true("DUPE_INCOMING" in engine._store.items); michael@0: michael@0: // On the server, the local ID should be deleted and the incoming ID should michael@0: // have its payload set to what was in the local record. michael@0: let collection = server.getCollection(user, "rotary"); michael@0: do_check_eq(1, collection.count()); michael@0: let wbo = collection.wbo("DUPE_INCOMING"); michael@0: do_check_neq(undefined, wbo); michael@0: let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); michael@0: do_check_eq("local", payload.denomination); michael@0: michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_reconcile_changed_dupe_new() { michael@0: _("Ensure locally changed duplicate record older than incoming is ignored."); michael@0: michael@0: // This test is similar to the above except the incoming record is younger michael@0: // than the local record. The incoming record should be authoritative. michael@0: let [engine, server, user] = createServerAndConfigureClient(); michael@0: michael@0: let now = Date.now() / 1000 - 10; michael@0: engine.lastSync = now; michael@0: engine.lastModified = now + 1; michael@0: michael@0: let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); michael@0: let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); michael@0: server.insertWBO(user, "rotary", wbo); michael@0: michael@0: engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); michael@0: engine._tracker.addChangedID("DUPE_LOCAL", now + 1); michael@0: do_check_true(engine._store.itemExists("DUPE_LOCAL")); michael@0: do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); michael@0: michael@0: engine._sync(); michael@0: michael@0: // The ID should have been changed to incoming. michael@0: do_check_attribute_count(engine._store.items, 1); michael@0: do_check_true("DUPE_INCOMING" in engine._store.items); michael@0: michael@0: // On the server, the local ID should be deleted and the incoming ID should michael@0: // have its payload retained. michael@0: let collection = server.getCollection(user, "rotary"); michael@0: do_check_eq(1, collection.count()); michael@0: let wbo = collection.wbo("DUPE_INCOMING"); michael@0: do_check_neq(undefined, wbo); michael@0: let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); michael@0: do_check_eq("incoming", payload.denomination); michael@0: cleanAndGo(server); michael@0: }); michael@0: michael@0: add_test(function test_processIncoming_mobile_batchSize() { michael@0: _("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients"); michael@0: michael@0: Svc.Prefs.set("client.type", "mobile"); michael@0: Service.identity.username = "foo"; michael@0: michael@0: // A collection that logs each GET michael@0: let collection = new ServerCollection(); michael@0: collection.get_log = []; michael@0: collection._get = collection.get; michael@0: collection.get = function (options) { michael@0: this.get_log.push(options); michael@0: return this._get(options); michael@0: }; michael@0: michael@0: // Let's create some 234 server side records. They're all at least michael@0: // 10 minutes old. michael@0: for (let i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + i}); michael@0: let wbo = new ServerWBO(id, payload); michael@0: wbo.modified = Date.now()/1000 - 60*(i+10); michael@0: collection.insertWBO(wbo); michael@0: } michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: _("On a mobile client, we get new records from the server in batches of 50."); michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: do_check_attribute_count(engine._store.items, 234); michael@0: do_check_true('record-no-0' in engine._store.items); michael@0: do_check_true('record-no-49' in engine._store.items); michael@0: do_check_true('record-no-50' in engine._store.items); michael@0: do_check_true('record-no-233' in engine._store.items); michael@0: michael@0: // Verify that the right number of GET requests with the right michael@0: // kind of parameters were made. michael@0: do_check_eq(collection.get_log.length, michael@0: Math.ceil(234 / MOBILE_BATCH_SIZE) + 1); michael@0: do_check_eq(collection.get_log[0].full, 1); michael@0: do_check_eq(collection.get_log[0].limit, MOBILE_BATCH_SIZE); michael@0: do_check_eq(collection.get_log[1].full, undefined); michael@0: do_check_eq(collection.get_log[1].limit, undefined); michael@0: for (let i = 1; i <= Math.floor(234 / MOBILE_BATCH_SIZE); i++) { michael@0: do_check_eq(collection.get_log[i+1].full, 1); michael@0: do_check_eq(collection.get_log[i+1].limit, undefined); michael@0: if (i < Math.floor(234 / MOBILE_BATCH_SIZE)) michael@0: do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE); michael@0: else michael@0: do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE); michael@0: } michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_store_toFetch() { michael@0: _("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync."); michael@0: Service.identity.username = "foo"; michael@0: Svc.Prefs.set("client.type", "mobile"); michael@0: michael@0: // A collection that throws at the fourth get. michael@0: let collection = new ServerCollection(); michael@0: collection._get_calls = 0; michael@0: collection._get = collection.get; michael@0: collection.get = function() { michael@0: this._get_calls += 1; michael@0: if (this._get_calls > 3) { michael@0: throw "Abort on fourth call!"; michael@0: } michael@0: return this._get.apply(this, arguments); michael@0: }; michael@0: michael@0: // Let's create three batches worth of server side records. michael@0: for (var i = 0; i < MOBILE_BATCH_SIZE * 3; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: let wbo = new ServerWBO(id, payload); michael@0: wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3); michael@0: collection.insertWBO(wbo); michael@0: } michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine.enabled = true; michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_empty(engine._store.items); michael@0: michael@0: let error; michael@0: try { michael@0: engine.sync(); michael@0: } catch (ex) { michael@0: error = ex; michael@0: } michael@0: do_check_true(!!error); michael@0: michael@0: // Only the first two batches have been applied. michael@0: do_check_eq(Object.keys(engine._store.items).length, michael@0: MOBILE_BATCH_SIZE * 2); michael@0: michael@0: // The third batch is stuck in toFetch. lastSync has been moved forward to michael@0: // the last successful item's timestamp. michael@0: do_check_eq(engine.toFetch.length, MOBILE_BATCH_SIZE); michael@0: do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_resume_toFetch() { michael@0: _("toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: const LASTSYNC = Date.now() / 1000; michael@0: michael@0: // Server records that will be downloaded michael@0: let collection = new ServerCollection(); michael@0: collection.insert('flying', michael@0: encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: collection.insert('scotsman', michael@0: encryptPayload({id: 'scotsman', michael@0: denomination: "Flying Scotsman"})); michael@0: collection.insert('rekolok', michael@0: encryptPayload({id: 'rekolok', michael@0: denomination: "Rekonstruktionslokomotive"})); michael@0: for (let i = 0; i < 3; i++) { michael@0: let id = 'failed' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + i}); michael@0: let wbo = new ServerWBO(id, payload); michael@0: wbo.modified = LASTSYNC - 10; michael@0: collection.insertWBO(wbo); michael@0: } michael@0: michael@0: collection.wbo("flying").modified = michael@0: collection.wbo("scotsman").modified = LASTSYNC - 10; michael@0: collection._wbos.rekolok.modified = LASTSYNC + 10; michael@0: michael@0: // Time travel 10 seconds into the future but still download the above WBOs. michael@0: let engine = makeRotaryEngine(); michael@0: engine.lastSync = LASTSYNC; michael@0: engine.toFetch = ["flying", "scotsman"]; michael@0: engine.previousFailed = ["failed0", "failed1", "failed2"]; michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine._store.items.flying, undefined); michael@0: do_check_eq(engine._store.items.scotsman, undefined); michael@0: do_check_eq(engine._store.items.rekolok, undefined); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Local records have been created from the server data. michael@0: do_check_eq(engine._store.items.flying, "LNER Class A3 4472"); michael@0: do_check_eq(engine._store.items.scotsman, "Flying Scotsman"); michael@0: do_check_eq(engine._store.items.rekolok, "Rekonstruktionslokomotive"); michael@0: do_check_eq(engine._store.items.failed0, "Record No. 0"); michael@0: do_check_eq(engine._store.items.failed1, "Record No. 1"); michael@0: do_check_eq(engine._store.items.failed2, "Record No. 2"); michael@0: do_check_eq(engine.previousFailed.length, 0); michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_applyIncomingBatchSize_smaller() { michael@0: _("Ensure that a number of incoming items less than applyIncomingBatchSize is still applied."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: // Engine that doesn't like the first and last record it's given. michael@0: const APPLY_BATCH_SIZE = 10; michael@0: let engine = makeRotaryEngine(); michael@0: engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; michael@0: engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; michael@0: engine._store.applyIncomingBatch = function (records) { michael@0: let failed1 = records.shift(); michael@0: let failed2 = records.pop(); michael@0: this._applyIncomingBatch(records); michael@0: return [failed1.id, failed2.id]; michael@0: }; michael@0: michael@0: // Let's create less than a batch worth of server side records. michael@0: let collection = new ServerCollection(); michael@0: for (let i = 0; i < APPLY_BATCH_SIZE - 1; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: collection.insert(id, payload); michael@0: } michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_empty(engine._store.items); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Records have been applied and the expected failures have failed. michael@0: do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE - 1 - 2); michael@0: do_check_eq(engine.toFetch.length, 0); michael@0: do_check_eq(engine.previousFailed.length, 2); michael@0: do_check_eq(engine.previousFailed[0], "record-no-0"); michael@0: do_check_eq(engine.previousFailed[1], "record-no-8"); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_applyIncomingBatchSize_multiple() { michael@0: _("Ensure that incoming items are applied according to applyIncomingBatchSize."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: const APPLY_BATCH_SIZE = 10; michael@0: michael@0: // Engine that applies records in batches. michael@0: let engine = makeRotaryEngine(); michael@0: engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; michael@0: let batchCalls = 0; michael@0: engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; michael@0: engine._store.applyIncomingBatch = function (records) { michael@0: batchCalls += 1; michael@0: do_check_eq(records.length, APPLY_BATCH_SIZE); michael@0: this._applyIncomingBatch.apply(this, arguments); michael@0: }; michael@0: michael@0: // Let's create three batches worth of server side records. michael@0: let collection = new ServerCollection(); michael@0: for (let i = 0; i < APPLY_BATCH_SIZE * 3; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: collection.insert(id, payload); michael@0: } michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_empty(engine._store.items); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Records have been applied in 3 batches. michael@0: do_check_eq(batchCalls, 3); michael@0: do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE * 3); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_notify_count() { michael@0: _("Ensure that failed records are reported only once."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: const APPLY_BATCH_SIZE = 5; michael@0: const NUMBER_OF_RECORDS = 15; michael@0: michael@0: // Engine that fails the first record. michael@0: let engine = makeRotaryEngine(); michael@0: engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; michael@0: engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; michael@0: engine._store.applyIncomingBatch = function (records) { michael@0: engine._store._applyIncomingBatch(records.slice(1)); michael@0: return [records[0].id]; michael@0: }; michael@0: michael@0: // Create a batch of server side records. michael@0: let collection = new ServerCollection(); michael@0: for (var i = 0; i < NUMBER_OF_RECORDS; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: collection.insert(id, payload); michael@0: } michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: // Confirm initial environment. michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_eq(engine.toFetch.length, 0); michael@0: do_check_eq(engine.previousFailed.length, 0); michael@0: do_check_empty(engine._store.items); michael@0: michael@0: let called = 0; michael@0: let counts; michael@0: function onApplied(count) { michael@0: _("Called with " + JSON.stringify(counts)); michael@0: counts = count; michael@0: called++; michael@0: } michael@0: Svc.Obs.add("weave:engine:sync:applied", onApplied); michael@0: michael@0: // Do sync. michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Confirm failures. michael@0: do_check_attribute_count(engine._store.items, 12); michael@0: do_check_eq(engine.previousFailed.length, 3); michael@0: do_check_eq(engine.previousFailed[0], "record-no-0"); michael@0: do_check_eq(engine.previousFailed[1], "record-no-5"); michael@0: do_check_eq(engine.previousFailed[2], "record-no-10"); michael@0: michael@0: // There are newly failed records and they are reported. michael@0: do_check_eq(called, 1); michael@0: do_check_eq(counts.failed, 3); michael@0: do_check_eq(counts.applied, 15); michael@0: do_check_eq(counts.newFailed, 3); michael@0: do_check_eq(counts.succeeded, 12); michael@0: michael@0: // Sync again, 1 of the failed items are the same, the rest didn't fail. michael@0: engine._processIncoming(); michael@0: michael@0: // Confirming removed failures. michael@0: do_check_attribute_count(engine._store.items, 14); michael@0: do_check_eq(engine.previousFailed.length, 1); michael@0: do_check_eq(engine.previousFailed[0], "record-no-0"); michael@0: michael@0: do_check_eq(called, 2); michael@0: do_check_eq(counts.failed, 1); michael@0: do_check_eq(counts.applied, 3); michael@0: do_check_eq(counts.newFailed, 0); michael@0: do_check_eq(counts.succeeded, 2); michael@0: michael@0: Svc.Obs.remove("weave:engine:sync:applied", onApplied); michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_previousFailed() { michael@0: _("Ensure that failed records are retried."); michael@0: Service.identity.username = "foo"; michael@0: Svc.Prefs.set("client.type", "mobile"); michael@0: michael@0: const APPLY_BATCH_SIZE = 4; michael@0: const NUMBER_OF_RECORDS = 14; michael@0: michael@0: // Engine that fails the first 2 records. michael@0: let engine = makeRotaryEngine(); michael@0: engine.mobileGUIDFetchBatchSize = engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; michael@0: engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; michael@0: engine._store.applyIncomingBatch = function (records) { michael@0: engine._store._applyIncomingBatch(records.slice(2)); michael@0: return [records[0].id, records[1].id]; michael@0: }; michael@0: michael@0: // Create a batch of server side records. michael@0: let collection = new ServerCollection(); michael@0: for (var i = 0; i < NUMBER_OF_RECORDS; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + i}); michael@0: collection.insert(id, payload); michael@0: } michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: // Confirm initial environment. michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_eq(engine.toFetch.length, 0); michael@0: do_check_eq(engine.previousFailed.length, 0); michael@0: do_check_empty(engine._store.items); michael@0: michael@0: // Initial failed items in previousFailed to be reset. michael@0: let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()]; michael@0: engine.previousFailed = previousFailed; michael@0: do_check_eq(engine.previousFailed, previousFailed); michael@0: michael@0: // Do sync. michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Expected result: 4 sync batches with 2 failures each => 8 failures michael@0: do_check_attribute_count(engine._store.items, 6); michael@0: do_check_eq(engine.previousFailed.length, 8); michael@0: do_check_eq(engine.previousFailed[0], "record-no-0"); michael@0: do_check_eq(engine.previousFailed[1], "record-no-1"); michael@0: do_check_eq(engine.previousFailed[2], "record-no-4"); michael@0: do_check_eq(engine.previousFailed[3], "record-no-5"); michael@0: do_check_eq(engine.previousFailed[4], "record-no-8"); michael@0: do_check_eq(engine.previousFailed[5], "record-no-9"); michael@0: do_check_eq(engine.previousFailed[6], "record-no-12"); michael@0: do_check_eq(engine.previousFailed[7], "record-no-13"); michael@0: michael@0: // Sync again with the same failed items (records 0, 1, 8, 9). michael@0: engine._processIncoming(); michael@0: michael@0: // A second sync with the same failed items should not add the same items again. michael@0: // Items that did not fail a second time should no longer be in previousFailed. michael@0: do_check_attribute_count(engine._store.items, 10); michael@0: do_check_eq(engine.previousFailed.length, 4); michael@0: do_check_eq(engine.previousFailed[0], "record-no-0"); michael@0: do_check_eq(engine.previousFailed[1], "record-no-1"); michael@0: do_check_eq(engine.previousFailed[2], "record-no-8"); michael@0: do_check_eq(engine.previousFailed[3], "record-no-9"); michael@0: michael@0: // Refetched items that didn't fail the second time are in engine._store.items. michael@0: do_check_eq(engine._store.items['record-no-4'], "Record No. 4"); michael@0: do_check_eq(engine._store.items['record-no-5'], "Record No. 5"); michael@0: do_check_eq(engine._store.items['record-no-12'], "Record No. 12"); michael@0: do_check_eq(engine._store.items['record-no-13'], "Record No. 13"); michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_failed_records() { michael@0: _("Ensure that failed records from _reconcile and applyIncomingBatch are refetched."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: // Let's create three and a bit batches worth of server side records. michael@0: let collection = new ServerCollection(); michael@0: const NUMBER_OF_RECORDS = MOBILE_BATCH_SIZE * 3 + 5; michael@0: for (let i = 0; i < NUMBER_OF_RECORDS; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: let wbo = new ServerWBO(id, payload); michael@0: wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3); michael@0: collection.insertWBO(wbo); michael@0: } michael@0: michael@0: // Engine that batches but likes to throw on a couple of records, michael@0: // two in each batch: the even ones fail in reconcile, the odd ones michael@0: // in applyIncoming. michael@0: const BOGUS_RECORDS = ["record-no-" + 42, michael@0: "record-no-" + 23, michael@0: "record-no-" + (42 + MOBILE_BATCH_SIZE), michael@0: "record-no-" + (23 + MOBILE_BATCH_SIZE), michael@0: "record-no-" + (42 + MOBILE_BATCH_SIZE * 2), michael@0: "record-no-" + (23 + MOBILE_BATCH_SIZE * 2), michael@0: "record-no-" + (2 + MOBILE_BATCH_SIZE * 3), michael@0: "record-no-" + (1 + MOBILE_BATCH_SIZE * 3)]; michael@0: let engine = makeRotaryEngine(); michael@0: engine.applyIncomingBatchSize = MOBILE_BATCH_SIZE; michael@0: michael@0: engine.__reconcile = engine._reconcile; michael@0: engine._reconcile = function _reconcile(record) { michael@0: if (BOGUS_RECORDS.indexOf(record.id) % 2 == 0) { michael@0: throw "I don't like this record! Baaaaaah!"; michael@0: } michael@0: return this.__reconcile.apply(this, arguments); michael@0: }; michael@0: engine._store._applyIncoming = engine._store.applyIncoming; michael@0: engine._store.applyIncoming = function (record) { michael@0: if (BOGUS_RECORDS.indexOf(record.id) % 2 == 1) { michael@0: throw "I don't like this record! Baaaaaah!"; michael@0: } michael@0: return this._applyIncoming.apply(this, arguments); michael@0: }; michael@0: michael@0: // Keep track of requests made of a collection. michael@0: let count = 0; michael@0: let uris = []; michael@0: function recording_handler(collection) { michael@0: let h = collection.handler(); michael@0: return function(req, res) { michael@0: ++count; michael@0: uris.push(req.path + "?" + req.queryString); michael@0: return h(req, res); michael@0: }; michael@0: } michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": recording_handler(collection) michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.lastSync, 0); michael@0: do_check_eq(engine.toFetch.length, 0); michael@0: do_check_eq(engine.previousFailed.length, 0); michael@0: do_check_empty(engine._store.items); michael@0: michael@0: let observerSubject; michael@0: let observerData; michael@0: Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) { michael@0: Svc.Obs.remove("weave:engine:sync:applied", onApplied); michael@0: observerSubject = subject; michael@0: observerData = data; michael@0: }); michael@0: michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: // Ensure that all records but the bogus 4 have been applied. michael@0: do_check_attribute_count(engine._store.items, michael@0: NUMBER_OF_RECORDS - BOGUS_RECORDS.length); michael@0: michael@0: // Ensure that the bogus records will be fetched again on the next sync. michael@0: do_check_eq(engine.previousFailed.length, BOGUS_RECORDS.length); michael@0: engine.previousFailed.sort(); michael@0: BOGUS_RECORDS.sort(); michael@0: for (let i = 0; i < engine.previousFailed.length; i++) { michael@0: do_check_eq(engine.previousFailed[i], BOGUS_RECORDS[i]); michael@0: } michael@0: michael@0: // Ensure the observer was notified michael@0: do_check_eq(observerData, engine.name); michael@0: do_check_eq(observerSubject.failed, BOGUS_RECORDS.length); michael@0: do_check_eq(observerSubject.newFailed, BOGUS_RECORDS.length); michael@0: michael@0: // Testing batching of failed item fetches. michael@0: // Try to sync again. Ensure that we split the request into chunks to avoid michael@0: // URI length limitations. michael@0: function batchDownload(batchSize) { michael@0: count = 0; michael@0: uris = []; michael@0: engine.guidFetchBatchSize = batchSize; michael@0: engine._processIncoming(); michael@0: _("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris)); michael@0: return count; michael@0: } michael@0: michael@0: // There are 8 bad records, so this needs 3 fetches. michael@0: _("Test batching with ID batch size 3, normal mobile batch size."); michael@0: do_check_eq(batchDownload(3), 3); michael@0: michael@0: // Now see with a more realistic limit. michael@0: _("Test batching with sufficient ID batch size."); michael@0: do_check_eq(batchDownload(BOGUS_RECORDS.length), 1); michael@0: michael@0: // If we're on mobile, that limit is used by default. michael@0: _("Test batching with tiny mobile batch size."); michael@0: Svc.Prefs.set("client.type", "mobile"); michael@0: engine.mobileGUIDFetchBatchSize = 2; michael@0: do_check_eq(batchDownload(BOGUS_RECORDS.length), 4); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_processIncoming_decrypt_failed() { michael@0: _("Ensure that records failing to decrypt are either replaced or refetched."); michael@0: michael@0: Service.identity.username = "foo"; michael@0: michael@0: // Some good and some bogus records. One doesn't contain valid JSON, michael@0: // the other will throw during decrypt. michael@0: let collection = new ServerCollection(); michael@0: collection._wbos.flying = new ServerWBO( michael@0: 'flying', encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: collection._wbos.nojson = new ServerWBO("nojson", "This is invalid JSON"); michael@0: collection._wbos.nojson2 = new ServerWBO("nojson2", "This is invalid JSON"); michael@0: collection._wbos.scotsman = new ServerWBO( michael@0: 'scotsman', encryptPayload({id: 'scotsman', michael@0: denomination: "Flying Scotsman"})); michael@0: collection._wbos.nodecrypt = new ServerWBO("nodecrypt", "Decrypt this!"); michael@0: collection._wbos.nodecrypt2 = new ServerWBO("nodecrypt2", "Decrypt this!"); michael@0: michael@0: // Patch the fake crypto service to throw on the record above. michael@0: Svc.Crypto._decrypt = Svc.Crypto.decrypt; michael@0: Svc.Crypto.decrypt = function (ciphertext) { michael@0: if (ciphertext == "Decrypt this!") { michael@0: throw "Derp! Cipher finalized failed. Im ur crypto destroyin ur recordz."; michael@0: } michael@0: return this._decrypt.apply(this, arguments); michael@0: }; michael@0: michael@0: // Some broken records also exist locally. michael@0: let engine = makeRotaryEngine(); michael@0: engine.enabled = true; michael@0: engine._store.items = {nojson: "Valid JSON", michael@0: nodecrypt: "Valid ciphertext"}; michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: try { michael@0: michael@0: // Confirm initial state michael@0: do_check_eq(engine.toFetch.length, 0); michael@0: do_check_eq(engine.previousFailed.length, 0); michael@0: michael@0: let observerSubject; michael@0: let observerData; michael@0: Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) { michael@0: Svc.Obs.remove("weave:engine:sync:applied", onApplied); michael@0: observerSubject = subject; michael@0: observerData = data; michael@0: }); michael@0: michael@0: engine.lastSync = collection.wbo("nojson").modified - 1; michael@0: engine.sync(); michael@0: michael@0: do_check_eq(engine.previousFailed.length, 4); michael@0: do_check_eq(engine.previousFailed[0], "nojson"); michael@0: do_check_eq(engine.previousFailed[1], "nojson2"); michael@0: do_check_eq(engine.previousFailed[2], "nodecrypt"); michael@0: do_check_eq(engine.previousFailed[3], "nodecrypt2"); michael@0: michael@0: // Ensure the observer was notified michael@0: do_check_eq(observerData, engine.name); michael@0: do_check_eq(observerSubject.applied, 2); michael@0: do_check_eq(observerSubject.failed, 4); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_uploadOutgoing_toEmptyServer() { michael@0: _("SyncEngine._uploadOutgoing uploads new records to server"); michael@0: michael@0: Service.identity.username = "foo"; michael@0: let collection = new ServerCollection(); michael@0: collection._wbos.flying = new ServerWBO('flying'); michael@0: collection._wbos.scotsman = new ServerWBO('scotsman'); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler(), michael@0: "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(), michael@0: "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: generateNewKeys(Service.collectionKeys); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine.lastSync = 123; // needs to be non-zero so that tracker is queried michael@0: engine._store.items = {flying: "LNER Class A3 4472", michael@0: scotsman: "Flying Scotsman"}; michael@0: // Mark one of these records as changed michael@0: engine._tracker.addChangedID('scotsman', 0); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.lastSyncLocal, 0); michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: do_check_eq(collection.payload("scotsman"), undefined); michael@0: michael@0: engine._syncStartup(); michael@0: engine._uploadOutgoing(); michael@0: michael@0: // Local timestamp has been set. michael@0: do_check_true(engine.lastSyncLocal > 0); michael@0: michael@0: // Ensure the marked record ('scotsman') has been uploaded and is michael@0: // no longer marked. michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: do_check_true(!!collection.payload("scotsman")); michael@0: do_check_eq(JSON.parse(collection.wbo("scotsman").data.ciphertext).id, michael@0: "scotsman"); michael@0: do_check_eq(engine._tracker.changedIDs["scotsman"], undefined); michael@0: michael@0: // The 'flying' record wasn't marked so it wasn't uploaded michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_uploadOutgoing_failed() { michael@0: _("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload."); michael@0: michael@0: Service.identity.username = "foo"; michael@0: let collection = new ServerCollection(); michael@0: // We only define the "flying" WBO on the server, not the "scotsman" michael@0: // and "peppercorn" ones. michael@0: collection._wbos.flying = new ServerWBO('flying'); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine.lastSync = 123; // needs to be non-zero so that tracker is queried michael@0: engine._store.items = {flying: "LNER Class A3 4472", michael@0: scotsman: "Flying Scotsman", michael@0: peppercorn: "Peppercorn Class"}; michael@0: // Mark these records as changed michael@0: const FLYING_CHANGED = 12345; michael@0: const SCOTSMAN_CHANGED = 23456; michael@0: const PEPPERCORN_CHANGED = 34567; michael@0: engine._tracker.addChangedID('flying', FLYING_CHANGED); michael@0: engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED); michael@0: engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(engine.lastSyncLocal, 0); michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: do_check_eq(engine._tracker.changedIDs['flying'], FLYING_CHANGED); michael@0: do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED); michael@0: do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED); michael@0: michael@0: engine.enabled = true; michael@0: engine.sync(); michael@0: michael@0: // Local timestamp has been set. michael@0: do_check_true(engine.lastSyncLocal > 0); michael@0: michael@0: // Ensure the 'flying' record has been uploaded and is no longer marked. michael@0: do_check_true(!!collection.payload("flying")); michael@0: do_check_eq(engine._tracker.changedIDs['flying'], undefined); michael@0: michael@0: // The 'scotsman' and 'peppercorn' records couldn't be uploaded so michael@0: // they weren't cleared from the tracker. michael@0: do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED); michael@0: do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() { michael@0: _("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS"); michael@0: michael@0: Service.identity.username = "foo"; michael@0: let collection = new ServerCollection(); michael@0: michael@0: // Let's count how many times the client posts to the server michael@0: var noOfUploads = 0; michael@0: collection.post = (function(orig) { michael@0: return function() { michael@0: noOfUploads++; michael@0: return orig.apply(this, arguments); michael@0: }; michael@0: }(collection.post)); michael@0: michael@0: // Create a bunch of records (and server side handlers) michael@0: let engine = makeRotaryEngine(); michael@0: for (var i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: engine._store.items[id] = "Record No. " + i; michael@0: engine._tracker.addChangedID(id, 0); michael@0: collection.insert(id); michael@0: } michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: try { michael@0: michael@0: // Confirm initial environment. michael@0: do_check_eq(noOfUploads, 0); michael@0: michael@0: engine._syncStartup(); michael@0: engine._uploadOutgoing(); michael@0: michael@0: // Ensure all records have been uploaded. michael@0: for (i = 0; i < 234; i++) { michael@0: do_check_true(!!collection.payload('record-no-' + i)); michael@0: } michael@0: michael@0: // Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS. michael@0: do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS)); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_syncFinish_noDelete() { michael@0: _("SyncEngine._syncFinish resets tracker's score"); michael@0: michael@0: let server = httpd_setup({}); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: let engine = makeRotaryEngine(); michael@0: engine._delete = {}; // Nothing to delete michael@0: engine._tracker.score = 100; michael@0: michael@0: // _syncFinish() will reset the engine's score. michael@0: engine._syncFinish(); michael@0: do_check_eq(engine.score, 0); michael@0: server.stop(run_next_test); michael@0: }); michael@0: michael@0: michael@0: add_test(function test_syncFinish_deleteByIds() { michael@0: _("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs)."); michael@0: michael@0: Service.identity.username = "foo"; michael@0: let collection = new ServerCollection(); michael@0: collection._wbos.flying = new ServerWBO( michael@0: 'flying', encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: collection._wbos.scotsman = new ServerWBO( michael@0: 'scotsman', encryptPayload({id: 'scotsman', michael@0: denomination: "Flying Scotsman"})); michael@0: collection._wbos.rekolok = new ServerWBO( michael@0: 'rekolok', encryptPayload({id: 'rekolok', michael@0: denomination: "Rekonstruktionslokomotive"})); michael@0: michael@0: let server = httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: engine._delete = {ids: ['flying', 'rekolok']}; michael@0: engine._syncFinish(); michael@0: michael@0: // The 'flying' and 'rekolok' records were deleted while the michael@0: // 'scotsman' one wasn't. michael@0: do_check_eq(collection.payload("flying"), undefined); michael@0: do_check_true(!!collection.payload("scotsman")); michael@0: do_check_eq(collection.payload("rekolok"), undefined); michael@0: michael@0: // The deletion todo list has been reset. michael@0: do_check_eq(engine._delete.ids, undefined); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_syncFinish_deleteLotsInBatches() { michael@0: _("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs)."); michael@0: michael@0: Service.identity.username = "foo"; michael@0: let collection = new ServerCollection(); michael@0: michael@0: // Let's count how many times the client does a DELETE request to the server michael@0: var noOfUploads = 0; michael@0: collection.delete = (function(orig) { michael@0: return function() { michael@0: noOfUploads++; michael@0: return orig.apply(this, arguments); michael@0: }; michael@0: }(collection.delete)); michael@0: michael@0: // Create a bunch of records on the server michael@0: let now = Date.now(); michael@0: for (var i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + i}); michael@0: let wbo = new ServerWBO(id, payload); michael@0: wbo.modified = now / 1000 - 60 * (i + 110); michael@0: collection.insertWBO(wbo); michael@0: } michael@0: michael@0: let server = httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: michael@0: // Confirm initial environment michael@0: do_check_eq(noOfUploads, 0); michael@0: michael@0: // Declare what we want to have deleted: all records no. 100 and michael@0: // up and all records that are less than 200 mins old (which are michael@0: // records 0 thru 90). michael@0: engine._delete = {ids: [], michael@0: newer: now / 1000 - 60 * 200.5}; michael@0: for (i = 100; i < 234; i++) { michael@0: engine._delete.ids.push('record-no-' + i); michael@0: } michael@0: michael@0: engine._syncFinish(); michael@0: michael@0: // Ensure that the appropriate server data has been wiped while michael@0: // preserving records 90 thru 200. michael@0: for (i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: if (i <= 90 || i >= 100) { michael@0: do_check_eq(collection.payload(id), undefined); michael@0: } else { michael@0: do_check_true(!!collection.payload(id)); michael@0: } michael@0: } michael@0: michael@0: // The deletion was done in batches michael@0: do_check_eq(noOfUploads, 2 + 1); michael@0: michael@0: // The deletion todo list has been reset. michael@0: do_check_eq(engine._delete.ids, undefined); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: michael@0: add_test(function test_sync_partialUpload() { michael@0: _("SyncEngine.sync() keeps changedIDs that couldn't be uploaded."); michael@0: michael@0: Service.identity.username = "foo"; michael@0: michael@0: let collection = new ServerCollection(); michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: generateNewKeys(Service.collectionKeys); michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: engine.lastSync = 123; // needs to be non-zero so that tracker is queried michael@0: engine.lastSyncLocal = 456; michael@0: michael@0: // Let the third upload fail completely michael@0: var noOfUploads = 0; michael@0: collection.post = (function(orig) { michael@0: return function() { michael@0: if (noOfUploads == 2) michael@0: throw "FAIL!"; michael@0: noOfUploads++; michael@0: return orig.apply(this, arguments); michael@0: }; michael@0: }(collection.post)); michael@0: michael@0: // Create a bunch of records (and server side handlers) michael@0: for (let i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: engine._store.items[id] = "Record No. " + i; michael@0: engine._tracker.addChangedID(id, i); michael@0: // Let two items in the first upload batch fail. michael@0: if ((i != 23) && (i != 42)) { michael@0: collection.insert(id); michael@0: } michael@0: } michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: try { michael@0: michael@0: engine.enabled = true; michael@0: let error; michael@0: try { michael@0: engine.sync(); michael@0: } catch (ex) { michael@0: error = ex; michael@0: } michael@0: do_check_true(!!error); michael@0: michael@0: // The timestamp has been updated. michael@0: do_check_true(engine.lastSyncLocal > 456); michael@0: michael@0: for (let i = 0; i < 234; i++) { michael@0: let id = 'record-no-' + i; michael@0: // Ensure failed records are back in the tracker: michael@0: // * records no. 23 and 42 were rejected by the server, michael@0: // * records no. 200 and higher couldn't be uploaded because we failed michael@0: // hard on the 3rd upload. michael@0: if ((i == 23) || (i == 42) || (i >= 200)) michael@0: do_check_eq(engine._tracker.changedIDs[id], i); michael@0: else michael@0: do_check_false(id in engine._tracker.changedIDs); michael@0: } michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: add_test(function test_canDecrypt_noCryptoKeys() { michael@0: _("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: // Wipe collection keys so we can test the desired scenario. michael@0: Service.collectionKeys.clear(); michael@0: michael@0: let collection = new ServerCollection(); michael@0: collection._wbos.flying = new ServerWBO( michael@0: 'flying', encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: michael@0: do_check_false(engine.canDecrypt()); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: }); michael@0: michael@0: add_test(function test_canDecrypt_true() { michael@0: _("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server."); michael@0: Service.identity.username = "foo"; michael@0: michael@0: generateNewKeys(Service.collectionKeys); michael@0: michael@0: let collection = new ServerCollection(); michael@0: collection._wbos.flying = new ServerWBO( michael@0: 'flying', encryptPayload({id: 'flying', michael@0: denomination: "LNER Class A3 4472"})); michael@0: michael@0: let server = sync_httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: let engine = makeRotaryEngine(); michael@0: try { michael@0: michael@0: do_check_true(engine.canDecrypt()); michael@0: michael@0: } finally { michael@0: cleanAndGo(server); michael@0: } michael@0: michael@0: }); michael@0: michael@0: add_test(function test_syncapplied_observer() { michael@0: Service.identity.username = "foo"; michael@0: michael@0: const NUMBER_OF_RECORDS = 10; michael@0: michael@0: let engine = makeRotaryEngine(); michael@0: michael@0: // Create a batch of server side records. michael@0: let collection = new ServerCollection(); michael@0: for (var i = 0; i < NUMBER_OF_RECORDS; i++) { michael@0: let id = 'record-no-' + i; michael@0: let payload = encryptPayload({id: id, denomination: "Record No. " + id}); michael@0: collection.insert(id, payload); michael@0: } michael@0: michael@0: let server = httpd_setup({ michael@0: "/1.1/foo/storage/rotary": collection.handler() michael@0: }); michael@0: michael@0: let syncTesting = new SyncTestingInfrastructure(server); michael@0: michael@0: let meta_global = Service.recordManager.set(engine.metaURL, michael@0: new WBORecord(engine.metaURL)); michael@0: meta_global.payload.engines = {rotary: {version: engine.version, michael@0: syncID: engine.syncID}}; michael@0: michael@0: let numApplyCalls = 0; michael@0: let engine_name; michael@0: let count; michael@0: function onApplied(subject, data) { michael@0: numApplyCalls++; michael@0: engine_name = data; michael@0: count = subject; michael@0: } michael@0: michael@0: Svc.Obs.add("weave:engine:sync:applied", onApplied); michael@0: michael@0: try { michael@0: Service.scheduler.hasIncomingItems = false; michael@0: michael@0: // Do sync. michael@0: engine._syncStartup(); michael@0: engine._processIncoming(); michael@0: michael@0: do_check_attribute_count(engine._store.items, 10); michael@0: michael@0: do_check_eq(numApplyCalls, 1); michael@0: do_check_eq(engine_name, "rotary"); michael@0: do_check_eq(count.applied, 10); michael@0: michael@0: do_check_true(Service.scheduler.hasIncomingItems); michael@0: } finally { michael@0: cleanAndGo(server); michael@0: Service.scheduler.hasIncomingItems = false; michael@0: Svc.Obs.remove("weave:engine:sync:applied", onApplied); michael@0: } michael@0: });