services/sync/tests/unit/test_syncengine_sync.js

Wed, 31 Dec 2014 07:22:50 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 07:22:50 +0100
branch
TOR_BUG_3246
changeset 4
fc2d59ddac77
permissions
-rw-r--r--

Correct previous dual key logic pending first delivery installment.

michael@0 1 /* Any copyright is dedicated to the Public Domain.
michael@0 2 * http://creativecommons.org/publicdomain/zero/1.0/ */
michael@0 3
michael@0 4 Cu.import("resource://services-sync/constants.js");
michael@0 5 Cu.import("resource://services-sync/engines.js");
michael@0 6 Cu.import("resource://services-sync/policies.js");
michael@0 7 Cu.import("resource://services-sync/record.js");
michael@0 8 Cu.import("resource://services-sync/resource.js");
michael@0 9 Cu.import("resource://services-sync/service.js");
michael@0 10 Cu.import("resource://services-sync/util.js");
michael@0 11 Cu.import("resource://testing-common/services/sync/rotaryengine.js");
michael@0 12 Cu.import("resource://testing-common/services/sync/utils.js");
michael@0 13
michael@0 14 function makeRotaryEngine() {
michael@0 15 return new RotaryEngine(Service);
michael@0 16 }
michael@0 17
michael@0 18 function cleanAndGo(server) {
michael@0 19 Svc.Prefs.resetBranch("");
michael@0 20 Svc.Prefs.set("log.logger.engine.rotary", "Trace");
michael@0 21 Service.recordManager.clearCache();
michael@0 22 server.stop(run_next_test);
michael@0 23 }
michael@0 24
michael@0 25 function configureService(server, username, password) {
michael@0 26 Service.clusterURL = server.baseURI;
michael@0 27
michael@0 28 Service.identity.account = username || "foo";
michael@0 29 Service.identity.basicPassword = password || "password";
michael@0 30 }
michael@0 31
michael@0 32 function createServerAndConfigureClient() {
michael@0 33 let engine = new RotaryEngine(Service);
michael@0 34
michael@0 35 let contents = {
michael@0 36 meta: {global: {engines: {rotary: {version: engine.version,
michael@0 37 syncID: engine.syncID}}}},
michael@0 38 crypto: {},
michael@0 39 rotary: {}
michael@0 40 };
michael@0 41
michael@0 42 const USER = "foo";
michael@0 43 let server = new SyncServer();
michael@0 44 server.registerUser(USER, "password");
michael@0 45 server.createContents(USER, contents);
michael@0 46 server.start();
michael@0 47
michael@0 48 Service.serverURL = server.baseURI;
michael@0 49 Service.clusterURL = server.baseURI;
michael@0 50 Service.identity.username = USER;
michael@0 51 Service._updateCachedURLs();
michael@0 52
michael@0 53 return [engine, server, USER];
michael@0 54 }
michael@0 55
michael@0 56 function run_test() {
michael@0 57 generateNewKeys(Service.collectionKeys);
michael@0 58 Svc.Prefs.set("log.logger.engine.rotary", "Trace");
michael@0 59 run_next_test();
michael@0 60 }
michael@0 61
michael@0 62 /*
michael@0 63 * Tests
michael@0 64 *
michael@0 65 * SyncEngine._sync() is divided into four rather independent steps:
michael@0 66 *
michael@0 67 * - _syncStartup()
michael@0 68 * - _processIncoming()
michael@0 69 * - _uploadOutgoing()
michael@0 70 * - _syncFinish()
michael@0 71 *
michael@0 72 * In the spirit of unit testing, these are tested individually for
michael@0 73 * different scenarios below.
michael@0 74 */
michael@0 75
michael@0 76 add_test(function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
michael@0 77 _("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record");
michael@0 78
michael@0 79 // Some server side data that's going to be wiped
michael@0 80 let collection = new ServerCollection();
michael@0 81 collection.insert('flying',
michael@0 82 encryptPayload({id: 'flying',
michael@0 83 denomination: "LNER Class A3 4472"}));
michael@0 84 collection.insert('scotsman',
michael@0 85 encryptPayload({id: 'scotsman',
michael@0 86 denomination: "Flying Scotsman"}));
michael@0 87
michael@0 88 let server = sync_httpd_setup({
michael@0 89 "/1.1/foo/storage/rotary": collection.handler()
michael@0 90 });
michael@0 91
michael@0 92 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 93 Service.identity.username = "foo";
michael@0 94
michael@0 95 let engine = makeRotaryEngine();
michael@0 96 engine._store.items = {rekolok: "Rekonstruktionslokomotive"};
michael@0 97 try {
michael@0 98
michael@0 99 // Confirm initial environment
michael@0 100 do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
michael@0 101 let metaGlobal = Service.recordManager.get(engine.metaURL);
michael@0 102 do_check_eq(metaGlobal.payload.engines, undefined);
michael@0 103 do_check_true(!!collection.payload("flying"));
michael@0 104 do_check_true(!!collection.payload("scotsman"));
michael@0 105
michael@0 106 engine.lastSync = Date.now() / 1000;
michael@0 107 engine.lastSyncLocal = Date.now();
michael@0 108
michael@0 109 // Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
michael@0 110 // so it has nothing to check.
michael@0 111 engine._syncStartup();
michael@0 112
michael@0 113 // The meta/global WBO has been filled with data about the engine
michael@0 114 let engineData = metaGlobal.payload.engines["rotary"];
michael@0 115 do_check_eq(engineData.version, engine.version);
michael@0 116 do_check_eq(engineData.syncID, engine.syncID);
michael@0 117
michael@0 118 // Sync was reset and server data was wiped
michael@0 119 do_check_eq(engine.lastSync, 0);
michael@0 120 do_check_eq(collection.payload("flying"), undefined);
michael@0 121 do_check_eq(collection.payload("scotsman"), undefined);
michael@0 122
michael@0 123 } finally {
michael@0 124 cleanAndGo(server);
michael@0 125 }
michael@0 126 });
michael@0 127
michael@0 128 add_test(function test_syncStartup_serverHasNewerVersion() {
michael@0 129 _("SyncEngine._syncStartup ");
michael@0 130
michael@0 131 let global = new ServerWBO('global', {engines: {rotary: {version: 23456}}});
michael@0 132 let server = httpd_setup({
michael@0 133 "/1.1/foo/storage/meta/global": global.handler()
michael@0 134 });
michael@0 135
michael@0 136 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 137 Service.identity.username = "foo";
michael@0 138
michael@0 139 let engine = makeRotaryEngine();
michael@0 140 try {
michael@0 141
michael@0 142 // The server has a newer version of the data and our engine can
michael@0 143 // handle. That should give us an exception.
michael@0 144 let error;
michael@0 145 try {
michael@0 146 engine._syncStartup();
michael@0 147 } catch (ex) {
michael@0 148 error = ex;
michael@0 149 }
michael@0 150 do_check_eq(error.failureCode, VERSION_OUT_OF_DATE);
michael@0 151
michael@0 152 } finally {
michael@0 153 cleanAndGo(server);
michael@0 154 }
michael@0 155 });
michael@0 156
michael@0 157
michael@0 158 add_test(function test_syncStartup_syncIDMismatchResetsClient() {
michael@0 159 _("SyncEngine._syncStartup resets sync if syncIDs don't match");
michael@0 160
michael@0 161 let server = sync_httpd_setup({});
michael@0 162 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 163 Service.identity.username = "foo";
michael@0 164
michael@0 165 // global record with a different syncID than our engine has
michael@0 166 let engine = makeRotaryEngine();
michael@0 167 let global = new ServerWBO('global',
michael@0 168 {engines: {rotary: {version: engine.version,
michael@0 169 syncID: 'foobar'}}});
michael@0 170 server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler());
michael@0 171
michael@0 172 try {
michael@0 173
michael@0 174 // Confirm initial environment
michael@0 175 do_check_eq(engine.syncID, 'fake-guid-0');
michael@0 176 do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
michael@0 177
michael@0 178 engine.lastSync = Date.now() / 1000;
michael@0 179 engine.lastSyncLocal = Date.now();
michael@0 180 engine._syncStartup();
michael@0 181
michael@0 182 // The engine has assumed the server's syncID
michael@0 183 do_check_eq(engine.syncID, 'foobar');
michael@0 184
michael@0 185 // Sync was reset
michael@0 186 do_check_eq(engine.lastSync, 0);
michael@0 187
michael@0 188 } finally {
michael@0 189 cleanAndGo(server);
michael@0 190 }
michael@0 191 });
michael@0 192
michael@0 193
michael@0 194 add_test(function test_processIncoming_emptyServer() {
michael@0 195 _("SyncEngine._processIncoming working with an empty server backend");
michael@0 196
michael@0 197 let collection = new ServerCollection();
michael@0 198 let server = sync_httpd_setup({
michael@0 199 "/1.1/foo/storage/rotary": collection.handler()
michael@0 200 });
michael@0 201
michael@0 202 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 203 Service.identity.username = "foo";
michael@0 204
michael@0 205 let engine = makeRotaryEngine();
michael@0 206 try {
michael@0 207
michael@0 208 // Merely ensure that this code path is run without any errors
michael@0 209 engine._processIncoming();
michael@0 210 do_check_eq(engine.lastSync, 0);
michael@0 211
michael@0 212 } finally {
michael@0 213 cleanAndGo(server);
michael@0 214 }
michael@0 215 });
michael@0 216
michael@0 217
michael@0 218 add_test(function test_processIncoming_createFromServer() {
michael@0 219 _("SyncEngine._processIncoming creates new records from server data");
michael@0 220
michael@0 221 // Some server records that will be downloaded
michael@0 222 let collection = new ServerCollection();
michael@0 223 collection.insert('flying',
michael@0 224 encryptPayload({id: 'flying',
michael@0 225 denomination: "LNER Class A3 4472"}));
michael@0 226 collection.insert('scotsman',
michael@0 227 encryptPayload({id: 'scotsman',
michael@0 228 denomination: "Flying Scotsman"}));
michael@0 229
michael@0 230 // Two pathological cases involving relative URIs gone wrong.
michael@0 231 let pathologicalPayload = encryptPayload({id: '../pathological',
michael@0 232 denomination: "Pathological Case"});
michael@0 233 collection.insert('../pathological', pathologicalPayload);
michael@0 234
michael@0 235 let server = sync_httpd_setup({
michael@0 236 "/1.1/foo/storage/rotary": collection.handler(),
michael@0 237 "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
michael@0 238 "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
michael@0 239 });
michael@0 240
michael@0 241 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 242 Service.identity.username = "foo";
michael@0 243
michael@0 244 generateNewKeys(Service.collectionKeys);
michael@0 245
michael@0 246 let engine = makeRotaryEngine();
michael@0 247 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 248 new WBORecord(engine.metaURL));
michael@0 249 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 250 syncID: engine.syncID}};
michael@0 251
michael@0 252 try {
michael@0 253
michael@0 254 // Confirm initial environment
michael@0 255 do_check_eq(engine.lastSync, 0);
michael@0 256 do_check_eq(engine.lastModified, null);
michael@0 257 do_check_eq(engine._store.items.flying, undefined);
michael@0 258 do_check_eq(engine._store.items.scotsman, undefined);
michael@0 259 do_check_eq(engine._store.items['../pathological'], undefined);
michael@0 260
michael@0 261 engine._syncStartup();
michael@0 262 engine._processIncoming();
michael@0 263
michael@0 264 // Timestamps of last sync and last server modification are set.
michael@0 265 do_check_true(engine.lastSync > 0);
michael@0 266 do_check_true(engine.lastModified > 0);
michael@0 267
michael@0 268 // Local records have been created from the server data.
michael@0 269 do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
michael@0 270 do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
michael@0 271 do_check_eq(engine._store.items['../pathological'], "Pathological Case");
michael@0 272
michael@0 273 } finally {
michael@0 274 cleanAndGo(server);
michael@0 275 }
michael@0 276 });
michael@0 277
michael@0 278
michael@0 279 add_test(function test_processIncoming_reconcile() {
michael@0 280 _("SyncEngine._processIncoming updates local records");
michael@0 281
michael@0 282 let collection = new ServerCollection();
michael@0 283
michael@0 284 // This server record is newer than the corresponding client one,
michael@0 285 // so it'll update its data.
michael@0 286 collection.insert('newrecord',
michael@0 287 encryptPayload({id: 'newrecord',
michael@0 288 denomination: "New stuff..."}));
michael@0 289
michael@0 290 // This server record is newer than the corresponding client one,
michael@0 291 // so it'll update its data.
michael@0 292 collection.insert('newerserver',
michael@0 293 encryptPayload({id: 'newerserver',
michael@0 294 denomination: "New data!"}));
michael@0 295
michael@0 296 // This server record is 2 mins older than the client counterpart
michael@0 297 // but identical to it, so we're expecting the client record's
michael@0 298 // changedID to be reset.
michael@0 299 collection.insert('olderidentical',
michael@0 300 encryptPayload({id: 'olderidentical',
michael@0 301 denomination: "Older but identical"}));
michael@0 302 collection._wbos.olderidentical.modified -= 120;
michael@0 303
michael@0 304 // This item simply has different data than the corresponding client
michael@0 305 // record (which is unmodified), so it will update the client as well
michael@0 306 collection.insert('updateclient',
michael@0 307 encryptPayload({id: 'updateclient',
michael@0 308 denomination: "Get this!"}));
michael@0 309
michael@0 310 // This is a dupe of 'original'.
michael@0 311 collection.insert('duplication',
michael@0 312 encryptPayload({id: 'duplication',
michael@0 313 denomination: "Original Entry"}));
michael@0 314
michael@0 315 // This record is marked as deleted, so we're expecting the client
michael@0 316 // record to be removed.
michael@0 317 collection.insert('nukeme',
michael@0 318 encryptPayload({id: 'nukeme',
michael@0 319 denomination: "Nuke me!",
michael@0 320 deleted: true}));
michael@0 321
michael@0 322 let server = sync_httpd_setup({
michael@0 323 "/1.1/foo/storage/rotary": collection.handler()
michael@0 324 });
michael@0 325
michael@0 326 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 327 Service.identity.username = "foo";
michael@0 328
michael@0 329 let engine = makeRotaryEngine();
michael@0 330 engine._store.items = {newerserver: "New data, but not as new as server!",
michael@0 331 olderidentical: "Older but identical",
michael@0 332 updateclient: "Got data?",
michael@0 333 original: "Original Entry",
michael@0 334 long_original: "Long Original Entry",
michael@0 335 nukeme: "Nuke me!"};
michael@0 336 // Make this record 1 min old, thus older than the one on the server
michael@0 337 engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60);
michael@0 338 // This record has been changed 2 mins later than the one on the server
michael@0 339 engine._tracker.addChangedID('olderidentical', Date.now()/1000);
michael@0 340
michael@0 341 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 342 new WBORecord(engine.metaURL));
michael@0 343 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 344 syncID: engine.syncID}};
michael@0 345
michael@0 346 try {
michael@0 347
michael@0 348 // Confirm initial environment
michael@0 349 do_check_eq(engine._store.items.newrecord, undefined);
michael@0 350 do_check_eq(engine._store.items.newerserver, "New data, but not as new as server!");
michael@0 351 do_check_eq(engine._store.items.olderidentical, "Older but identical");
michael@0 352 do_check_eq(engine._store.items.updateclient, "Got data?");
michael@0 353 do_check_eq(engine._store.items.nukeme, "Nuke me!");
michael@0 354 do_check_true(engine._tracker.changedIDs['olderidentical'] > 0);
michael@0 355
michael@0 356 engine._syncStartup();
michael@0 357 engine._processIncoming();
michael@0 358
michael@0 359 // Timestamps of last sync and last server modification are set.
michael@0 360 do_check_true(engine.lastSync > 0);
michael@0 361 do_check_true(engine.lastModified > 0);
michael@0 362
michael@0 363 // The new record is created.
michael@0 364 do_check_eq(engine._store.items.newrecord, "New stuff...");
michael@0 365
michael@0 366 // The 'newerserver' record is updated since the server data is newer.
michael@0 367 do_check_eq(engine._store.items.newerserver, "New data!");
michael@0 368
michael@0 369 // The data for 'olderidentical' is identical on the server, so
michael@0 370 // it's no longer marked as changed anymore.
michael@0 371 do_check_eq(engine._store.items.olderidentical, "Older but identical");
michael@0 372 do_check_eq(engine._tracker.changedIDs['olderidentical'], undefined);
michael@0 373
michael@0 374 // Updated with server data.
michael@0 375 do_check_eq(engine._store.items.updateclient, "Get this!");
michael@0 376
michael@0 377 // The incoming ID is preferred.
michael@0 378 do_check_eq(engine._store.items.original, undefined);
michael@0 379 do_check_eq(engine._store.items.duplication, "Original Entry");
michael@0 380 do_check_neq(engine._delete.ids.indexOf("original"), -1);
michael@0 381
michael@0 382 // The 'nukeme' record marked as deleted is removed.
michael@0 383 do_check_eq(engine._store.items.nukeme, undefined);
michael@0 384 } finally {
michael@0 385 cleanAndGo(server);
michael@0 386 }
michael@0 387 });
michael@0 388
michael@0 389 add_test(function test_processIncoming_reconcile_local_deleted() {
michael@0 390 _("Ensure local, duplicate ID is deleted on server.");
michael@0 391
michael@0 392 // When a duplicate is resolved, the local ID (which is never taken) should
michael@0 393 // be deleted on the server.
michael@0 394 let [engine, server, user] = createServerAndConfigureClient();
michael@0 395
michael@0 396 let now = Date.now() / 1000 - 10;
michael@0 397 engine.lastSync = now;
michael@0 398 engine.lastModified = now + 1;
michael@0 399
michael@0 400 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
michael@0 401 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
michael@0 402 server.insertWBO(user, "rotary", wbo);
michael@0 403
michael@0 404 let record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"});
michael@0 405 let wbo = new ServerWBO("DUPE_LOCAL", record, now - 1);
michael@0 406 server.insertWBO(user, "rotary", wbo);
michael@0 407
michael@0 408 engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
michael@0 409 do_check_true(engine._store.itemExists("DUPE_LOCAL"));
michael@0 410 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
michael@0 411
michael@0 412 engine._sync();
michael@0 413
michael@0 414 do_check_attribute_count(engine._store.items, 1);
michael@0 415 do_check_true("DUPE_INCOMING" in engine._store.items);
michael@0 416
michael@0 417 let collection = server.getCollection(user, "rotary");
michael@0 418 do_check_eq(1, collection.count());
michael@0 419 do_check_neq(undefined, collection.wbo("DUPE_INCOMING"));
michael@0 420
michael@0 421 cleanAndGo(server);
michael@0 422 });
michael@0 423
michael@0 424 add_test(function test_processIncoming_reconcile_equivalent() {
michael@0 425 _("Ensure proper handling of incoming records that match local.");
michael@0 426
michael@0 427 let [engine, server, user] = createServerAndConfigureClient();
michael@0 428
michael@0 429 let now = Date.now() / 1000 - 10;
michael@0 430 engine.lastSync = now;
michael@0 431 engine.lastModified = now + 1;
michael@0 432
michael@0 433 let record = encryptPayload({id: "entry", denomination: "denomination"});
michael@0 434 let wbo = new ServerWBO("entry", record, now + 2);
michael@0 435 server.insertWBO(user, "rotary", wbo);
michael@0 436
michael@0 437 engine._store.items = {entry: "denomination"};
michael@0 438 do_check_true(engine._store.itemExists("entry"));
michael@0 439
michael@0 440 engine._sync();
michael@0 441
michael@0 442 do_check_attribute_count(engine._store.items, 1);
michael@0 443
michael@0 444 cleanAndGo(server);
michael@0 445 });
michael@0 446
michael@0 447 add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() {
michael@0 448 _("Ensure locally deleted duplicate record newer than incoming is handled.");
michael@0 449
michael@0 450 // This is a somewhat complicated test. It ensures that if a client receives
michael@0 451 // a modified record for an item that is deleted locally but with a different
michael@0 452 // ID that the incoming record is ignored. This is a corner case for record
michael@0 453 // handling, but it needs to be supported.
michael@0 454 let [engine, server, user] = createServerAndConfigureClient();
michael@0 455
michael@0 456 let now = Date.now() / 1000 - 10;
michael@0 457 engine.lastSync = now;
michael@0 458 engine.lastModified = now + 1;
michael@0 459
michael@0 460 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
michael@0 461 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
michael@0 462 server.insertWBO(user, "rotary", wbo);
michael@0 463
michael@0 464 // Simulate a locally-deleted item.
michael@0 465 engine._store.items = {};
michael@0 466 engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
michael@0 467 do_check_false(engine._store.itemExists("DUPE_LOCAL"));
michael@0 468 do_check_false(engine._store.itemExists("DUPE_INCOMING"));
michael@0 469 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
michael@0 470
michael@0 471 engine._sync();
michael@0 472
michael@0 473 // After the sync, the server's payload for the original ID should be marked
michael@0 474 // as deleted.
michael@0 475 do_check_empty(engine._store.items);
michael@0 476 let collection = server.getCollection(user, "rotary");
michael@0 477 do_check_eq(1, collection.count());
michael@0 478 let wbo = collection.wbo("DUPE_INCOMING");
michael@0 479 do_check_neq(null, wbo);
michael@0 480 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
michael@0 481 do_check_true(payload.deleted);
michael@0 482
michael@0 483 cleanAndGo(server);
michael@0 484 });
michael@0 485
michael@0 486 add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() {
michael@0 487 _("Ensure locally deleted duplicate record older than incoming is restored.");
michael@0 488
michael@0 489 // This is similar to the above test except it tests the condition where the
michael@0 490 // incoming record is newer than the local deletion, therefore overriding it.
michael@0 491
michael@0 492 let [engine, server, user] = createServerAndConfigureClient();
michael@0 493
michael@0 494 let now = Date.now() / 1000 - 10;
michael@0 495 engine.lastSync = now;
michael@0 496 engine.lastModified = now + 1;
michael@0 497
michael@0 498 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
michael@0 499 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
michael@0 500 server.insertWBO(user, "rotary", wbo);
michael@0 501
michael@0 502 // Simulate a locally-deleted item.
michael@0 503 engine._store.items = {};
michael@0 504 engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
michael@0 505 do_check_false(engine._store.itemExists("DUPE_LOCAL"));
michael@0 506 do_check_false(engine._store.itemExists("DUPE_INCOMING"));
michael@0 507 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
michael@0 508
michael@0 509 engine._sync();
michael@0 510
michael@0 511 // Since the remote change is newer, the incoming item should exist locally.
michael@0 512 do_check_attribute_count(engine._store.items, 1);
michael@0 513 do_check_true("DUPE_INCOMING" in engine._store.items);
michael@0 514 do_check_eq("incoming", engine._store.items.DUPE_INCOMING);
michael@0 515
michael@0 516 let collection = server.getCollection(user, "rotary");
michael@0 517 do_check_eq(1, collection.count());
michael@0 518 let wbo = collection.wbo("DUPE_INCOMING");
michael@0 519 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
michael@0 520 do_check_eq("incoming", payload.denomination);
michael@0 521
michael@0 522 cleanAndGo(server);
michael@0 523 });
michael@0 524
michael@0 525 add_test(function test_processIncoming_reconcile_changed_dupe() {
michael@0 526 _("Ensure that locally changed duplicate record is handled properly.");
michael@0 527
michael@0 528 let [engine, server, user] = createServerAndConfigureClient();
michael@0 529
michael@0 530 let now = Date.now() / 1000 - 10;
michael@0 531 engine.lastSync = now;
michael@0 532 engine.lastModified = now + 1;
michael@0 533
michael@0 534 // The local record is newer than the incoming one, so it should be retained.
michael@0 535 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
michael@0 536 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
michael@0 537 server.insertWBO(user, "rotary", wbo);
michael@0 538
michael@0 539 engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
michael@0 540 engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
michael@0 541 do_check_true(engine._store.itemExists("DUPE_LOCAL"));
michael@0 542 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
michael@0 543
michael@0 544 engine._sync();
michael@0 545
michael@0 546 // The ID should have been changed to incoming.
michael@0 547 do_check_attribute_count(engine._store.items, 1);
michael@0 548 do_check_true("DUPE_INCOMING" in engine._store.items);
michael@0 549
michael@0 550 // On the server, the local ID should be deleted and the incoming ID should
michael@0 551 // have its payload set to what was in the local record.
michael@0 552 let collection = server.getCollection(user, "rotary");
michael@0 553 do_check_eq(1, collection.count());
michael@0 554 let wbo = collection.wbo("DUPE_INCOMING");
michael@0 555 do_check_neq(undefined, wbo);
michael@0 556 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
michael@0 557 do_check_eq("local", payload.denomination);
michael@0 558
michael@0 559 cleanAndGo(server);
michael@0 560 });
michael@0 561
michael@0 562 add_test(function test_processIncoming_reconcile_changed_dupe_new() {
michael@0 563 _("Ensure locally changed duplicate record older than incoming is ignored.");
michael@0 564
michael@0 565 // This test is similar to the above except the incoming record is younger
michael@0 566 // than the local record. The incoming record should be authoritative.
michael@0 567 let [engine, server, user] = createServerAndConfigureClient();
michael@0 568
michael@0 569 let now = Date.now() / 1000 - 10;
michael@0 570 engine.lastSync = now;
michael@0 571 engine.lastModified = now + 1;
michael@0 572
michael@0 573 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
michael@0 574 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
michael@0 575 server.insertWBO(user, "rotary", wbo);
michael@0 576
michael@0 577 engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
michael@0 578 engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
michael@0 579 do_check_true(engine._store.itemExists("DUPE_LOCAL"));
michael@0 580 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
michael@0 581
michael@0 582 engine._sync();
michael@0 583
michael@0 584 // The ID should have been changed to incoming.
michael@0 585 do_check_attribute_count(engine._store.items, 1);
michael@0 586 do_check_true("DUPE_INCOMING" in engine._store.items);
michael@0 587
michael@0 588 // On the server, the local ID should be deleted and the incoming ID should
michael@0 589 // have its payload retained.
michael@0 590 let collection = server.getCollection(user, "rotary");
michael@0 591 do_check_eq(1, collection.count());
michael@0 592 let wbo = collection.wbo("DUPE_INCOMING");
michael@0 593 do_check_neq(undefined, wbo);
michael@0 594 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
michael@0 595 do_check_eq("incoming", payload.denomination);
michael@0 596 cleanAndGo(server);
michael@0 597 });
michael@0 598
michael@0 599 add_test(function test_processIncoming_mobile_batchSize() {
michael@0 600 _("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");
michael@0 601
michael@0 602 Svc.Prefs.set("client.type", "mobile");
michael@0 603 Service.identity.username = "foo";
michael@0 604
michael@0 605 // A collection that logs each GET
michael@0 606 let collection = new ServerCollection();
michael@0 607 collection.get_log = [];
michael@0 608 collection._get = collection.get;
michael@0 609 collection.get = function (options) {
michael@0 610 this.get_log.push(options);
michael@0 611 return this._get(options);
michael@0 612 };
michael@0 613
michael@0 614 // Let's create some 234 server side records. They're all at least
michael@0 615 // 10 minutes old.
michael@0 616 for (let i = 0; i < 234; i++) {
michael@0 617 let id = 'record-no-' + i;
michael@0 618 let payload = encryptPayload({id: id, denomination: "Record No. " + i});
michael@0 619 let wbo = new ServerWBO(id, payload);
michael@0 620 wbo.modified = Date.now()/1000 - 60*(i+10);
michael@0 621 collection.insertWBO(wbo);
michael@0 622 }
michael@0 623
michael@0 624 let server = sync_httpd_setup({
michael@0 625 "/1.1/foo/storage/rotary": collection.handler()
michael@0 626 });
michael@0 627
michael@0 628 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 629
michael@0 630 let engine = makeRotaryEngine();
michael@0 631 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 632 new WBORecord(engine.metaURL));
michael@0 633 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 634 syncID: engine.syncID}};
michael@0 635
michael@0 636 try {
michael@0 637
michael@0 638 _("On a mobile client, we get new records from the server in batches of 50.");
michael@0 639 engine._syncStartup();
michael@0 640 engine._processIncoming();
michael@0 641 do_check_attribute_count(engine._store.items, 234);
michael@0 642 do_check_true('record-no-0' in engine._store.items);
michael@0 643 do_check_true('record-no-49' in engine._store.items);
michael@0 644 do_check_true('record-no-50' in engine._store.items);
michael@0 645 do_check_true('record-no-233' in engine._store.items);
michael@0 646
michael@0 647 // Verify that the right number of GET requests with the right
michael@0 648 // kind of parameters were made.
michael@0 649 do_check_eq(collection.get_log.length,
michael@0 650 Math.ceil(234 / MOBILE_BATCH_SIZE) + 1);
michael@0 651 do_check_eq(collection.get_log[0].full, 1);
michael@0 652 do_check_eq(collection.get_log[0].limit, MOBILE_BATCH_SIZE);
michael@0 653 do_check_eq(collection.get_log[1].full, undefined);
michael@0 654 do_check_eq(collection.get_log[1].limit, undefined);
michael@0 655 for (let i = 1; i <= Math.floor(234 / MOBILE_BATCH_SIZE); i++) {
michael@0 656 do_check_eq(collection.get_log[i+1].full, 1);
michael@0 657 do_check_eq(collection.get_log[i+1].limit, undefined);
michael@0 658 if (i < Math.floor(234 / MOBILE_BATCH_SIZE))
michael@0 659 do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE);
michael@0 660 else
michael@0 661 do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE);
michael@0 662 }
michael@0 663
michael@0 664 } finally {
michael@0 665 cleanAndGo(server);
michael@0 666 }
michael@0 667 });
michael@0 668
michael@0 669
michael@0 670 add_test(function test_processIncoming_store_toFetch() {
michael@0 671 _("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync.");
michael@0 672 Service.identity.username = "foo";
michael@0 673 Svc.Prefs.set("client.type", "mobile");
michael@0 674
michael@0 675 // A collection that throws at the fourth get.
michael@0 676 let collection = new ServerCollection();
michael@0 677 collection._get_calls = 0;
michael@0 678 collection._get = collection.get;
michael@0 679 collection.get = function() {
michael@0 680 this._get_calls += 1;
michael@0 681 if (this._get_calls > 3) {
michael@0 682 throw "Abort on fourth call!";
michael@0 683 }
michael@0 684 return this._get.apply(this, arguments);
michael@0 685 };
michael@0 686
michael@0 687 // Let's create three batches worth of server side records.
michael@0 688 for (var i = 0; i < MOBILE_BATCH_SIZE * 3; i++) {
michael@0 689 let id = 'record-no-' + i;
michael@0 690 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 691 let wbo = new ServerWBO(id, payload);
michael@0 692 wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3);
michael@0 693 collection.insertWBO(wbo);
michael@0 694 }
michael@0 695
michael@0 696 let engine = makeRotaryEngine();
michael@0 697 engine.enabled = true;
michael@0 698
michael@0 699 let server = sync_httpd_setup({
michael@0 700 "/1.1/foo/storage/rotary": collection.handler()
michael@0 701 });
michael@0 702
michael@0 703 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 704
michael@0 705 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 706 new WBORecord(engine.metaURL));
michael@0 707 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 708 syncID: engine.syncID}};
michael@0 709 try {
michael@0 710
michael@0 711 // Confirm initial environment
michael@0 712 do_check_eq(engine.lastSync, 0);
michael@0 713 do_check_empty(engine._store.items);
michael@0 714
michael@0 715 let error;
michael@0 716 try {
michael@0 717 engine.sync();
michael@0 718 } catch (ex) {
michael@0 719 error = ex;
michael@0 720 }
michael@0 721 do_check_true(!!error);
michael@0 722
michael@0 723 // Only the first two batches have been applied.
michael@0 724 do_check_eq(Object.keys(engine._store.items).length,
michael@0 725 MOBILE_BATCH_SIZE * 2);
michael@0 726
michael@0 727 // The third batch is stuck in toFetch. lastSync has been moved forward to
michael@0 728 // the last successful item's timestamp.
michael@0 729 do_check_eq(engine.toFetch.length, MOBILE_BATCH_SIZE);
michael@0 730 do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified);
michael@0 731
michael@0 732 } finally {
michael@0 733 cleanAndGo(server);
michael@0 734 }
michael@0 735 });
michael@0 736
michael@0 737
michael@0 738 add_test(function test_processIncoming_resume_toFetch() {
michael@0 739 _("toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items.");
michael@0 740 Service.identity.username = "foo";
michael@0 741
michael@0 742 const LASTSYNC = Date.now() / 1000;
michael@0 743
michael@0 744 // Server records that will be downloaded
michael@0 745 let collection = new ServerCollection();
michael@0 746 collection.insert('flying',
michael@0 747 encryptPayload({id: 'flying',
michael@0 748 denomination: "LNER Class A3 4472"}));
michael@0 749 collection.insert('scotsman',
michael@0 750 encryptPayload({id: 'scotsman',
michael@0 751 denomination: "Flying Scotsman"}));
michael@0 752 collection.insert('rekolok',
michael@0 753 encryptPayload({id: 'rekolok',
michael@0 754 denomination: "Rekonstruktionslokomotive"}));
michael@0 755 for (let i = 0; i < 3; i++) {
michael@0 756 let id = 'failed' + i;
michael@0 757 let payload = encryptPayload({id: id, denomination: "Record No. " + i});
michael@0 758 let wbo = new ServerWBO(id, payload);
michael@0 759 wbo.modified = LASTSYNC - 10;
michael@0 760 collection.insertWBO(wbo);
michael@0 761 }
michael@0 762
michael@0 763 collection.wbo("flying").modified =
michael@0 764 collection.wbo("scotsman").modified = LASTSYNC - 10;
michael@0 765 collection._wbos.rekolok.modified = LASTSYNC + 10;
michael@0 766
michael@0 767 // Time travel 10 seconds into the future but still download the above WBOs.
michael@0 768 let engine = makeRotaryEngine();
michael@0 769 engine.lastSync = LASTSYNC;
michael@0 770 engine.toFetch = ["flying", "scotsman"];
michael@0 771 engine.previousFailed = ["failed0", "failed1", "failed2"];
michael@0 772
michael@0 773 let server = sync_httpd_setup({
michael@0 774 "/1.1/foo/storage/rotary": collection.handler()
michael@0 775 });
michael@0 776
michael@0 777 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 778
michael@0 779 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 780 new WBORecord(engine.metaURL));
michael@0 781 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 782 syncID: engine.syncID}};
michael@0 783 try {
michael@0 784
michael@0 785 // Confirm initial environment
michael@0 786 do_check_eq(engine._store.items.flying, undefined);
michael@0 787 do_check_eq(engine._store.items.scotsman, undefined);
michael@0 788 do_check_eq(engine._store.items.rekolok, undefined);
michael@0 789
michael@0 790 engine._syncStartup();
michael@0 791 engine._processIncoming();
michael@0 792
michael@0 793 // Local records have been created from the server data.
michael@0 794 do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
michael@0 795 do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
michael@0 796 do_check_eq(engine._store.items.rekolok, "Rekonstruktionslokomotive");
michael@0 797 do_check_eq(engine._store.items.failed0, "Record No. 0");
michael@0 798 do_check_eq(engine._store.items.failed1, "Record No. 1");
michael@0 799 do_check_eq(engine._store.items.failed2, "Record No. 2");
michael@0 800 do_check_eq(engine.previousFailed.length, 0);
michael@0 801 } finally {
michael@0 802 cleanAndGo(server);
michael@0 803 }
michael@0 804 });
michael@0 805
michael@0 806
michael@0 807 add_test(function test_processIncoming_applyIncomingBatchSize_smaller() {
michael@0 808 _("Ensure that a number of incoming items less than applyIncomingBatchSize is still applied.");
michael@0 809 Service.identity.username = "foo";
michael@0 810
michael@0 811 // Engine that doesn't like the first and last record it's given.
michael@0 812 const APPLY_BATCH_SIZE = 10;
michael@0 813 let engine = makeRotaryEngine();
michael@0 814 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
michael@0 815 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
michael@0 816 engine._store.applyIncomingBatch = function (records) {
michael@0 817 let failed1 = records.shift();
michael@0 818 let failed2 = records.pop();
michael@0 819 this._applyIncomingBatch(records);
michael@0 820 return [failed1.id, failed2.id];
michael@0 821 };
michael@0 822
michael@0 823 // Let's create less than a batch worth of server side records.
michael@0 824 let collection = new ServerCollection();
michael@0 825 for (let i = 0; i < APPLY_BATCH_SIZE - 1; i++) {
michael@0 826 let id = 'record-no-' + i;
michael@0 827 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 828 collection.insert(id, payload);
michael@0 829 }
michael@0 830
michael@0 831 let server = sync_httpd_setup({
michael@0 832 "/1.1/foo/storage/rotary": collection.handler()
michael@0 833 });
michael@0 834
michael@0 835 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 836
michael@0 837 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 838 new WBORecord(engine.metaURL));
michael@0 839 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 840 syncID: engine.syncID}};
michael@0 841 try {
michael@0 842
michael@0 843 // Confirm initial environment
michael@0 844 do_check_empty(engine._store.items);
michael@0 845
michael@0 846 engine._syncStartup();
michael@0 847 engine._processIncoming();
michael@0 848
michael@0 849 // Records have been applied and the expected failures have failed.
michael@0 850 do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE - 1 - 2);
michael@0 851 do_check_eq(engine.toFetch.length, 0);
michael@0 852 do_check_eq(engine.previousFailed.length, 2);
michael@0 853 do_check_eq(engine.previousFailed[0], "record-no-0");
michael@0 854 do_check_eq(engine.previousFailed[1], "record-no-8");
michael@0 855
michael@0 856 } finally {
michael@0 857 cleanAndGo(server);
michael@0 858 }
michael@0 859 });
michael@0 860
michael@0 861
michael@0 862 add_test(function test_processIncoming_applyIncomingBatchSize_multiple() {
michael@0 863 _("Ensure that incoming items are applied according to applyIncomingBatchSize.");
michael@0 864 Service.identity.username = "foo";
michael@0 865
michael@0 866 const APPLY_BATCH_SIZE = 10;
michael@0 867
michael@0 868 // Engine that applies records in batches.
michael@0 869 let engine = makeRotaryEngine();
michael@0 870 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
michael@0 871 let batchCalls = 0;
michael@0 872 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
michael@0 873 engine._store.applyIncomingBatch = function (records) {
michael@0 874 batchCalls += 1;
michael@0 875 do_check_eq(records.length, APPLY_BATCH_SIZE);
michael@0 876 this._applyIncomingBatch.apply(this, arguments);
michael@0 877 };
michael@0 878
michael@0 879 // Let's create three batches worth of server side records.
michael@0 880 let collection = new ServerCollection();
michael@0 881 for (let i = 0; i < APPLY_BATCH_SIZE * 3; i++) {
michael@0 882 let id = 'record-no-' + i;
michael@0 883 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 884 collection.insert(id, payload);
michael@0 885 }
michael@0 886
michael@0 887 let server = sync_httpd_setup({
michael@0 888 "/1.1/foo/storage/rotary": collection.handler()
michael@0 889 });
michael@0 890
michael@0 891 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 892
michael@0 893 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 894 new WBORecord(engine.metaURL));
michael@0 895 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 896 syncID: engine.syncID}};
michael@0 897 try {
michael@0 898
michael@0 899 // Confirm initial environment
michael@0 900 do_check_empty(engine._store.items);
michael@0 901
michael@0 902 engine._syncStartup();
michael@0 903 engine._processIncoming();
michael@0 904
michael@0 905 // Records have been applied in 3 batches.
michael@0 906 do_check_eq(batchCalls, 3);
michael@0 907 do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE * 3);
michael@0 908
michael@0 909 } finally {
michael@0 910 cleanAndGo(server);
michael@0 911 }
michael@0 912 });
michael@0 913
michael@0 914
michael@0 915 add_test(function test_processIncoming_notify_count() {
michael@0 916 _("Ensure that failed records are reported only once.");
michael@0 917 Service.identity.username = "foo";
michael@0 918
michael@0 919 const APPLY_BATCH_SIZE = 5;
michael@0 920 const NUMBER_OF_RECORDS = 15;
michael@0 921
michael@0 922 // Engine that fails the first record.
michael@0 923 let engine = makeRotaryEngine();
michael@0 924 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
michael@0 925 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
michael@0 926 engine._store.applyIncomingBatch = function (records) {
michael@0 927 engine._store._applyIncomingBatch(records.slice(1));
michael@0 928 return [records[0].id];
michael@0 929 };
michael@0 930
michael@0 931 // Create a batch of server side records.
michael@0 932 let collection = new ServerCollection();
michael@0 933 for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
michael@0 934 let id = 'record-no-' + i;
michael@0 935 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 936 collection.insert(id, payload);
michael@0 937 }
michael@0 938
michael@0 939 let server = sync_httpd_setup({
michael@0 940 "/1.1/foo/storage/rotary": collection.handler()
michael@0 941 });
michael@0 942
michael@0 943 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 944
michael@0 945 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 946 new WBORecord(engine.metaURL));
michael@0 947 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 948 syncID: engine.syncID}};
michael@0 949 try {
michael@0 950 // Confirm initial environment.
michael@0 951 do_check_eq(engine.lastSync, 0);
michael@0 952 do_check_eq(engine.toFetch.length, 0);
michael@0 953 do_check_eq(engine.previousFailed.length, 0);
michael@0 954 do_check_empty(engine._store.items);
michael@0 955
michael@0 956 let called = 0;
michael@0 957 let counts;
michael@0 958 function onApplied(count) {
michael@0 959 _("Called with " + JSON.stringify(counts));
michael@0 960 counts = count;
michael@0 961 called++;
michael@0 962 }
michael@0 963 Svc.Obs.add("weave:engine:sync:applied", onApplied);
michael@0 964
michael@0 965 // Do sync.
michael@0 966 engine._syncStartup();
michael@0 967 engine._processIncoming();
michael@0 968
michael@0 969 // Confirm failures.
michael@0 970 do_check_attribute_count(engine._store.items, 12);
michael@0 971 do_check_eq(engine.previousFailed.length, 3);
michael@0 972 do_check_eq(engine.previousFailed[0], "record-no-0");
michael@0 973 do_check_eq(engine.previousFailed[1], "record-no-5");
michael@0 974 do_check_eq(engine.previousFailed[2], "record-no-10");
michael@0 975
michael@0 976 // There are newly failed records and they are reported.
michael@0 977 do_check_eq(called, 1);
michael@0 978 do_check_eq(counts.failed, 3);
michael@0 979 do_check_eq(counts.applied, 15);
michael@0 980 do_check_eq(counts.newFailed, 3);
michael@0 981 do_check_eq(counts.succeeded, 12);
michael@0 982
michael@0 983 // Sync again, 1 of the failed items are the same, the rest didn't fail.
michael@0 984 engine._processIncoming();
michael@0 985
michael@0 986 // Confirming removed failures.
michael@0 987 do_check_attribute_count(engine._store.items, 14);
michael@0 988 do_check_eq(engine.previousFailed.length, 1);
michael@0 989 do_check_eq(engine.previousFailed[0], "record-no-0");
michael@0 990
michael@0 991 do_check_eq(called, 2);
michael@0 992 do_check_eq(counts.failed, 1);
michael@0 993 do_check_eq(counts.applied, 3);
michael@0 994 do_check_eq(counts.newFailed, 0);
michael@0 995 do_check_eq(counts.succeeded, 2);
michael@0 996
michael@0 997 Svc.Obs.remove("weave:engine:sync:applied", onApplied);
michael@0 998 } finally {
michael@0 999 cleanAndGo(server);
michael@0 1000 }
michael@0 1001 });
michael@0 1002
michael@0 1003
michael@0 1004 add_test(function test_processIncoming_previousFailed() {
michael@0 1005 _("Ensure that failed records are retried.");
michael@0 1006 Service.identity.username = "foo";
michael@0 1007 Svc.Prefs.set("client.type", "mobile");
michael@0 1008
michael@0 1009 const APPLY_BATCH_SIZE = 4;
michael@0 1010 const NUMBER_OF_RECORDS = 14;
michael@0 1011
michael@0 1012 // Engine that fails the first 2 records.
michael@0 1013 let engine = makeRotaryEngine();
michael@0 1014 engine.mobileGUIDFetchBatchSize = engine.applyIncomingBatchSize = APPLY_BATCH_SIZE;
michael@0 1015 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch;
michael@0 1016 engine._store.applyIncomingBatch = function (records) {
michael@0 1017 engine._store._applyIncomingBatch(records.slice(2));
michael@0 1018 return [records[0].id, records[1].id];
michael@0 1019 };
michael@0 1020
michael@0 1021 // Create a batch of server side records.
michael@0 1022 let collection = new ServerCollection();
michael@0 1023 for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
michael@0 1024 let id = 'record-no-' + i;
michael@0 1025 let payload = encryptPayload({id: id, denomination: "Record No. " + i});
michael@0 1026 collection.insert(id, payload);
michael@0 1027 }
michael@0 1028
michael@0 1029 let server = sync_httpd_setup({
michael@0 1030 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1031 });
michael@0 1032
michael@0 1033 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1034
michael@0 1035 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1036 new WBORecord(engine.metaURL));
michael@0 1037 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1038 syncID: engine.syncID}};
michael@0 1039 try {
michael@0 1040 // Confirm initial environment.
michael@0 1041 do_check_eq(engine.lastSync, 0);
michael@0 1042 do_check_eq(engine.toFetch.length, 0);
michael@0 1043 do_check_eq(engine.previousFailed.length, 0);
michael@0 1044 do_check_empty(engine._store.items);
michael@0 1045
michael@0 1046 // Initial failed items in previousFailed to be reset.
michael@0 1047 let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()];
michael@0 1048 engine.previousFailed = previousFailed;
michael@0 1049 do_check_eq(engine.previousFailed, previousFailed);
michael@0 1050
michael@0 1051 // Do sync.
michael@0 1052 engine._syncStartup();
michael@0 1053 engine._processIncoming();
michael@0 1054
michael@0 1055 // Expected result: 4 sync batches with 2 failures each => 8 failures
michael@0 1056 do_check_attribute_count(engine._store.items, 6);
michael@0 1057 do_check_eq(engine.previousFailed.length, 8);
michael@0 1058 do_check_eq(engine.previousFailed[0], "record-no-0");
michael@0 1059 do_check_eq(engine.previousFailed[1], "record-no-1");
michael@0 1060 do_check_eq(engine.previousFailed[2], "record-no-4");
michael@0 1061 do_check_eq(engine.previousFailed[3], "record-no-5");
michael@0 1062 do_check_eq(engine.previousFailed[4], "record-no-8");
michael@0 1063 do_check_eq(engine.previousFailed[5], "record-no-9");
michael@0 1064 do_check_eq(engine.previousFailed[6], "record-no-12");
michael@0 1065 do_check_eq(engine.previousFailed[7], "record-no-13");
michael@0 1066
michael@0 1067 // Sync again with the same failed items (records 0, 1, 8, 9).
michael@0 1068 engine._processIncoming();
michael@0 1069
michael@0 1070 // A second sync with the same failed items should not add the same items again.
michael@0 1071 // Items that did not fail a second time should no longer be in previousFailed.
michael@0 1072 do_check_attribute_count(engine._store.items, 10);
michael@0 1073 do_check_eq(engine.previousFailed.length, 4);
michael@0 1074 do_check_eq(engine.previousFailed[0], "record-no-0");
michael@0 1075 do_check_eq(engine.previousFailed[1], "record-no-1");
michael@0 1076 do_check_eq(engine.previousFailed[2], "record-no-8");
michael@0 1077 do_check_eq(engine.previousFailed[3], "record-no-9");
michael@0 1078
michael@0 1079 // Refetched items that didn't fail the second time are in engine._store.items.
michael@0 1080 do_check_eq(engine._store.items['record-no-4'], "Record No. 4");
michael@0 1081 do_check_eq(engine._store.items['record-no-5'], "Record No. 5");
michael@0 1082 do_check_eq(engine._store.items['record-no-12'], "Record No. 12");
michael@0 1083 do_check_eq(engine._store.items['record-no-13'], "Record No. 13");
michael@0 1084 } finally {
michael@0 1085 cleanAndGo(server);
michael@0 1086 }
michael@0 1087 });
michael@0 1088
michael@0 1089
michael@0 1090 add_test(function test_processIncoming_failed_records() {
michael@0 1091 _("Ensure that failed records from _reconcile and applyIncomingBatch are refetched.");
michael@0 1092 Service.identity.username = "foo";
michael@0 1093
michael@0 1094 // Let's create three and a bit batches worth of server side records.
michael@0 1095 let collection = new ServerCollection();
michael@0 1096 const NUMBER_OF_RECORDS = MOBILE_BATCH_SIZE * 3 + 5;
michael@0 1097 for (let i = 0; i < NUMBER_OF_RECORDS; i++) {
michael@0 1098 let id = 'record-no-' + i;
michael@0 1099 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 1100 let wbo = new ServerWBO(id, payload);
michael@0 1101 wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3);
michael@0 1102 collection.insertWBO(wbo);
michael@0 1103 }
michael@0 1104
michael@0 1105 // Engine that batches but likes to throw on a couple of records,
michael@0 1106 // two in each batch: the even ones fail in reconcile, the odd ones
michael@0 1107 // in applyIncoming.
michael@0 1108 const BOGUS_RECORDS = ["record-no-" + 42,
michael@0 1109 "record-no-" + 23,
michael@0 1110 "record-no-" + (42 + MOBILE_BATCH_SIZE),
michael@0 1111 "record-no-" + (23 + MOBILE_BATCH_SIZE),
michael@0 1112 "record-no-" + (42 + MOBILE_BATCH_SIZE * 2),
michael@0 1113 "record-no-" + (23 + MOBILE_BATCH_SIZE * 2),
michael@0 1114 "record-no-" + (2 + MOBILE_BATCH_SIZE * 3),
michael@0 1115 "record-no-" + (1 + MOBILE_BATCH_SIZE * 3)];
michael@0 1116 let engine = makeRotaryEngine();
michael@0 1117 engine.applyIncomingBatchSize = MOBILE_BATCH_SIZE;
michael@0 1118
michael@0 1119 engine.__reconcile = engine._reconcile;
michael@0 1120 engine._reconcile = function _reconcile(record) {
michael@0 1121 if (BOGUS_RECORDS.indexOf(record.id) % 2 == 0) {
michael@0 1122 throw "I don't like this record! Baaaaaah!";
michael@0 1123 }
michael@0 1124 return this.__reconcile.apply(this, arguments);
michael@0 1125 };
michael@0 1126 engine._store._applyIncoming = engine._store.applyIncoming;
michael@0 1127 engine._store.applyIncoming = function (record) {
michael@0 1128 if (BOGUS_RECORDS.indexOf(record.id) % 2 == 1) {
michael@0 1129 throw "I don't like this record! Baaaaaah!";
michael@0 1130 }
michael@0 1131 return this._applyIncoming.apply(this, arguments);
michael@0 1132 };
michael@0 1133
michael@0 1134 // Keep track of requests made of a collection.
michael@0 1135 let count = 0;
michael@0 1136 let uris = [];
michael@0 1137 function recording_handler(collection) {
michael@0 1138 let h = collection.handler();
michael@0 1139 return function(req, res) {
michael@0 1140 ++count;
michael@0 1141 uris.push(req.path + "?" + req.queryString);
michael@0 1142 return h(req, res);
michael@0 1143 };
michael@0 1144 }
michael@0 1145 let server = sync_httpd_setup({
michael@0 1146 "/1.1/foo/storage/rotary": recording_handler(collection)
michael@0 1147 });
michael@0 1148
michael@0 1149 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1150
michael@0 1151 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1152 new WBORecord(engine.metaURL));
michael@0 1153 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1154 syncID: engine.syncID}};
michael@0 1155
michael@0 1156 try {
michael@0 1157
michael@0 1158 // Confirm initial environment
michael@0 1159 do_check_eq(engine.lastSync, 0);
michael@0 1160 do_check_eq(engine.toFetch.length, 0);
michael@0 1161 do_check_eq(engine.previousFailed.length, 0);
michael@0 1162 do_check_empty(engine._store.items);
michael@0 1163
michael@0 1164 let observerSubject;
michael@0 1165 let observerData;
michael@0 1166 Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
michael@0 1167 Svc.Obs.remove("weave:engine:sync:applied", onApplied);
michael@0 1168 observerSubject = subject;
michael@0 1169 observerData = data;
michael@0 1170 });
michael@0 1171
michael@0 1172 engine._syncStartup();
michael@0 1173 engine._processIncoming();
michael@0 1174
michael@0 1175 // Ensure that all records but the bogus 4 have been applied.
michael@0 1176 do_check_attribute_count(engine._store.items,
michael@0 1177 NUMBER_OF_RECORDS - BOGUS_RECORDS.length);
michael@0 1178
michael@0 1179 // Ensure that the bogus records will be fetched again on the next sync.
michael@0 1180 do_check_eq(engine.previousFailed.length, BOGUS_RECORDS.length);
michael@0 1181 engine.previousFailed.sort();
michael@0 1182 BOGUS_RECORDS.sort();
michael@0 1183 for (let i = 0; i < engine.previousFailed.length; i++) {
michael@0 1184 do_check_eq(engine.previousFailed[i], BOGUS_RECORDS[i]);
michael@0 1185 }
michael@0 1186
michael@0 1187 // Ensure the observer was notified
michael@0 1188 do_check_eq(observerData, engine.name);
michael@0 1189 do_check_eq(observerSubject.failed, BOGUS_RECORDS.length);
michael@0 1190 do_check_eq(observerSubject.newFailed, BOGUS_RECORDS.length);
michael@0 1191
michael@0 1192 // Testing batching of failed item fetches.
michael@0 1193 // Try to sync again. Ensure that we split the request into chunks to avoid
michael@0 1194 // URI length limitations.
michael@0 1195 function batchDownload(batchSize) {
michael@0 1196 count = 0;
michael@0 1197 uris = [];
michael@0 1198 engine.guidFetchBatchSize = batchSize;
michael@0 1199 engine._processIncoming();
michael@0 1200 _("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris));
michael@0 1201 return count;
michael@0 1202 }
michael@0 1203
michael@0 1204 // There are 8 bad records, so this needs 3 fetches.
michael@0 1205 _("Test batching with ID batch size 3, normal mobile batch size.");
michael@0 1206 do_check_eq(batchDownload(3), 3);
michael@0 1207
michael@0 1208 // Now see with a more realistic limit.
michael@0 1209 _("Test batching with sufficient ID batch size.");
michael@0 1210 do_check_eq(batchDownload(BOGUS_RECORDS.length), 1);
michael@0 1211
michael@0 1212 // If we're on mobile, that limit is used by default.
michael@0 1213 _("Test batching with tiny mobile batch size.");
michael@0 1214 Svc.Prefs.set("client.type", "mobile");
michael@0 1215 engine.mobileGUIDFetchBatchSize = 2;
michael@0 1216 do_check_eq(batchDownload(BOGUS_RECORDS.length), 4);
michael@0 1217
michael@0 1218 } finally {
michael@0 1219 cleanAndGo(server);
michael@0 1220 }
michael@0 1221 });
michael@0 1222
michael@0 1223
michael@0 1224 add_test(function test_processIncoming_decrypt_failed() {
michael@0 1225 _("Ensure that records failing to decrypt are either replaced or refetched.");
michael@0 1226
michael@0 1227 Service.identity.username = "foo";
michael@0 1228
michael@0 1229 // Some good and some bogus records. One doesn't contain valid JSON,
michael@0 1230 // the other will throw during decrypt.
michael@0 1231 let collection = new ServerCollection();
michael@0 1232 collection._wbos.flying = new ServerWBO(
michael@0 1233 'flying', encryptPayload({id: 'flying',
michael@0 1234 denomination: "LNER Class A3 4472"}));
michael@0 1235 collection._wbos.nojson = new ServerWBO("nojson", "This is invalid JSON");
michael@0 1236 collection._wbos.nojson2 = new ServerWBO("nojson2", "This is invalid JSON");
michael@0 1237 collection._wbos.scotsman = new ServerWBO(
michael@0 1238 'scotsman', encryptPayload({id: 'scotsman',
michael@0 1239 denomination: "Flying Scotsman"}));
michael@0 1240 collection._wbos.nodecrypt = new ServerWBO("nodecrypt", "Decrypt this!");
michael@0 1241 collection._wbos.nodecrypt2 = new ServerWBO("nodecrypt2", "Decrypt this!");
michael@0 1242
michael@0 1243 // Patch the fake crypto service to throw on the record above.
michael@0 1244 Svc.Crypto._decrypt = Svc.Crypto.decrypt;
michael@0 1245 Svc.Crypto.decrypt = function (ciphertext) {
michael@0 1246 if (ciphertext == "Decrypt this!") {
michael@0 1247 throw "Derp! Cipher finalized failed. Im ur crypto destroyin ur recordz.";
michael@0 1248 }
michael@0 1249 return this._decrypt.apply(this, arguments);
michael@0 1250 };
michael@0 1251
michael@0 1252 // Some broken records also exist locally.
michael@0 1253 let engine = makeRotaryEngine();
michael@0 1254 engine.enabled = true;
michael@0 1255 engine._store.items = {nojson: "Valid JSON",
michael@0 1256 nodecrypt: "Valid ciphertext"};
michael@0 1257
michael@0 1258 let server = sync_httpd_setup({
michael@0 1259 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1260 });
michael@0 1261
michael@0 1262 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1263
michael@0 1264 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1265 new WBORecord(engine.metaURL));
michael@0 1266 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1267 syncID: engine.syncID}};
michael@0 1268 try {
michael@0 1269
michael@0 1270 // Confirm initial state
michael@0 1271 do_check_eq(engine.toFetch.length, 0);
michael@0 1272 do_check_eq(engine.previousFailed.length, 0);
michael@0 1273
michael@0 1274 let observerSubject;
michael@0 1275 let observerData;
michael@0 1276 Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) {
michael@0 1277 Svc.Obs.remove("weave:engine:sync:applied", onApplied);
michael@0 1278 observerSubject = subject;
michael@0 1279 observerData = data;
michael@0 1280 });
michael@0 1281
michael@0 1282 engine.lastSync = collection.wbo("nojson").modified - 1;
michael@0 1283 engine.sync();
michael@0 1284
michael@0 1285 do_check_eq(engine.previousFailed.length, 4);
michael@0 1286 do_check_eq(engine.previousFailed[0], "nojson");
michael@0 1287 do_check_eq(engine.previousFailed[1], "nojson2");
michael@0 1288 do_check_eq(engine.previousFailed[2], "nodecrypt");
michael@0 1289 do_check_eq(engine.previousFailed[3], "nodecrypt2");
michael@0 1290
michael@0 1291 // Ensure the observer was notified
michael@0 1292 do_check_eq(observerData, engine.name);
michael@0 1293 do_check_eq(observerSubject.applied, 2);
michael@0 1294 do_check_eq(observerSubject.failed, 4);
michael@0 1295
michael@0 1296 } finally {
michael@0 1297 cleanAndGo(server);
michael@0 1298 }
michael@0 1299 });
michael@0 1300
michael@0 1301
michael@0 1302 add_test(function test_uploadOutgoing_toEmptyServer() {
michael@0 1303 _("SyncEngine._uploadOutgoing uploads new records to server");
michael@0 1304
michael@0 1305 Service.identity.username = "foo";
michael@0 1306 let collection = new ServerCollection();
michael@0 1307 collection._wbos.flying = new ServerWBO('flying');
michael@0 1308 collection._wbos.scotsman = new ServerWBO('scotsman');
michael@0 1309
michael@0 1310 let server = sync_httpd_setup({
michael@0 1311 "/1.1/foo/storage/rotary": collection.handler(),
michael@0 1312 "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(),
michael@0 1313 "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler()
michael@0 1314 });
michael@0 1315
michael@0 1316 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1317 generateNewKeys(Service.collectionKeys);
michael@0 1318
michael@0 1319 let engine = makeRotaryEngine();
michael@0 1320 engine.lastSync = 123; // needs to be non-zero so that tracker is queried
michael@0 1321 engine._store.items = {flying: "LNER Class A3 4472",
michael@0 1322 scotsman: "Flying Scotsman"};
michael@0 1323 // Mark one of these records as changed
michael@0 1324 engine._tracker.addChangedID('scotsman', 0);
michael@0 1325
michael@0 1326 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1327 new WBORecord(engine.metaURL));
michael@0 1328 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1329 syncID: engine.syncID}};
michael@0 1330
michael@0 1331 try {
michael@0 1332
michael@0 1333 // Confirm initial environment
michael@0 1334 do_check_eq(engine.lastSyncLocal, 0);
michael@0 1335 do_check_eq(collection.payload("flying"), undefined);
michael@0 1336 do_check_eq(collection.payload("scotsman"), undefined);
michael@0 1337
michael@0 1338 engine._syncStartup();
michael@0 1339 engine._uploadOutgoing();
michael@0 1340
michael@0 1341 // Local timestamp has been set.
michael@0 1342 do_check_true(engine.lastSyncLocal > 0);
michael@0 1343
michael@0 1344 // Ensure the marked record ('scotsman') has been uploaded and is
michael@0 1345 // no longer marked.
michael@0 1346 do_check_eq(collection.payload("flying"), undefined);
michael@0 1347 do_check_true(!!collection.payload("scotsman"));
michael@0 1348 do_check_eq(JSON.parse(collection.wbo("scotsman").data.ciphertext).id,
michael@0 1349 "scotsman");
michael@0 1350 do_check_eq(engine._tracker.changedIDs["scotsman"], undefined);
michael@0 1351
michael@0 1352 // The 'flying' record wasn't marked so it wasn't uploaded
michael@0 1353 do_check_eq(collection.payload("flying"), undefined);
michael@0 1354
michael@0 1355 } finally {
michael@0 1356 cleanAndGo(server);
michael@0 1357 }
michael@0 1358 });
michael@0 1359
michael@0 1360
michael@0 1361 add_test(function test_uploadOutgoing_failed() {
michael@0 1362 _("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload.");
michael@0 1363
michael@0 1364 Service.identity.username = "foo";
michael@0 1365 let collection = new ServerCollection();
michael@0 1366 // We only define the "flying" WBO on the server, not the "scotsman"
michael@0 1367 // and "peppercorn" ones.
michael@0 1368 collection._wbos.flying = new ServerWBO('flying');
michael@0 1369
michael@0 1370 let server = sync_httpd_setup({
michael@0 1371 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1372 });
michael@0 1373
michael@0 1374 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1375
michael@0 1376 let engine = makeRotaryEngine();
michael@0 1377 engine.lastSync = 123; // needs to be non-zero so that tracker is queried
michael@0 1378 engine._store.items = {flying: "LNER Class A3 4472",
michael@0 1379 scotsman: "Flying Scotsman",
michael@0 1380 peppercorn: "Peppercorn Class"};
michael@0 1381 // Mark these records as changed
michael@0 1382 const FLYING_CHANGED = 12345;
michael@0 1383 const SCOTSMAN_CHANGED = 23456;
michael@0 1384 const PEPPERCORN_CHANGED = 34567;
michael@0 1385 engine._tracker.addChangedID('flying', FLYING_CHANGED);
michael@0 1386 engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED);
michael@0 1387 engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED);
michael@0 1388
michael@0 1389 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1390 new WBORecord(engine.metaURL));
michael@0 1391 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1392 syncID: engine.syncID}};
michael@0 1393
michael@0 1394 try {
michael@0 1395
michael@0 1396 // Confirm initial environment
michael@0 1397 do_check_eq(engine.lastSyncLocal, 0);
michael@0 1398 do_check_eq(collection.payload("flying"), undefined);
michael@0 1399 do_check_eq(engine._tracker.changedIDs['flying'], FLYING_CHANGED);
michael@0 1400 do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
michael@0 1401 do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
michael@0 1402
michael@0 1403 engine.enabled = true;
michael@0 1404 engine.sync();
michael@0 1405
michael@0 1406 // Local timestamp has been set.
michael@0 1407 do_check_true(engine.lastSyncLocal > 0);
michael@0 1408
michael@0 1409 // Ensure the 'flying' record has been uploaded and is no longer marked.
michael@0 1410 do_check_true(!!collection.payload("flying"));
michael@0 1411 do_check_eq(engine._tracker.changedIDs['flying'], undefined);
michael@0 1412
michael@0 1413 // The 'scotsman' and 'peppercorn' records couldn't be uploaded so
michael@0 1414 // they weren't cleared from the tracker.
michael@0 1415 do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
michael@0 1416 do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
michael@0 1417
michael@0 1418 } finally {
michael@0 1419 cleanAndGo(server);
michael@0 1420 }
michael@0 1421 });
michael@0 1422
michael@0 1423
michael@0 1424 add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
michael@0 1425 _("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
michael@0 1426
michael@0 1427 Service.identity.username = "foo";
michael@0 1428 let collection = new ServerCollection();
michael@0 1429
michael@0 1430 // Let's count how many times the client posts to the server
michael@0 1431 var noOfUploads = 0;
michael@0 1432 collection.post = (function(orig) {
michael@0 1433 return function() {
michael@0 1434 noOfUploads++;
michael@0 1435 return orig.apply(this, arguments);
michael@0 1436 };
michael@0 1437 }(collection.post));
michael@0 1438
michael@0 1439 // Create a bunch of records (and server side handlers)
michael@0 1440 let engine = makeRotaryEngine();
michael@0 1441 for (var i = 0; i < 234; i++) {
michael@0 1442 let id = 'record-no-' + i;
michael@0 1443 engine._store.items[id] = "Record No. " + i;
michael@0 1444 engine._tracker.addChangedID(id, 0);
michael@0 1445 collection.insert(id);
michael@0 1446 }
michael@0 1447
michael@0 1448 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1449 new WBORecord(engine.metaURL));
michael@0 1450 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1451 syncID: engine.syncID}};
michael@0 1452
michael@0 1453 let server = sync_httpd_setup({
michael@0 1454 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1455 });
michael@0 1456
michael@0 1457 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1458
michael@0 1459 try {
michael@0 1460
michael@0 1461 // Confirm initial environment.
michael@0 1462 do_check_eq(noOfUploads, 0);
michael@0 1463
michael@0 1464 engine._syncStartup();
michael@0 1465 engine._uploadOutgoing();
michael@0 1466
michael@0 1467 // Ensure all records have been uploaded.
michael@0 1468 for (i = 0; i < 234; i++) {
michael@0 1469 do_check_true(!!collection.payload('record-no-' + i));
michael@0 1470 }
michael@0 1471
michael@0 1472 // Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS.
michael@0 1473 do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS));
michael@0 1474
michael@0 1475 } finally {
michael@0 1476 cleanAndGo(server);
michael@0 1477 }
michael@0 1478 });
michael@0 1479
michael@0 1480
michael@0 1481 add_test(function test_syncFinish_noDelete() {
michael@0 1482 _("SyncEngine._syncFinish resets tracker's score");
michael@0 1483
michael@0 1484 let server = httpd_setup({});
michael@0 1485
michael@0 1486 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1487 let engine = makeRotaryEngine();
michael@0 1488 engine._delete = {}; // Nothing to delete
michael@0 1489 engine._tracker.score = 100;
michael@0 1490
michael@0 1491 // _syncFinish() will reset the engine's score.
michael@0 1492 engine._syncFinish();
michael@0 1493 do_check_eq(engine.score, 0);
michael@0 1494 server.stop(run_next_test);
michael@0 1495 });
michael@0 1496
michael@0 1497
michael@0 1498 add_test(function test_syncFinish_deleteByIds() {
michael@0 1499 _("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs).");
michael@0 1500
michael@0 1501 Service.identity.username = "foo";
michael@0 1502 let collection = new ServerCollection();
michael@0 1503 collection._wbos.flying = new ServerWBO(
michael@0 1504 'flying', encryptPayload({id: 'flying',
michael@0 1505 denomination: "LNER Class A3 4472"}));
michael@0 1506 collection._wbos.scotsman = new ServerWBO(
michael@0 1507 'scotsman', encryptPayload({id: 'scotsman',
michael@0 1508 denomination: "Flying Scotsman"}));
michael@0 1509 collection._wbos.rekolok = new ServerWBO(
michael@0 1510 'rekolok', encryptPayload({id: 'rekolok',
michael@0 1511 denomination: "Rekonstruktionslokomotive"}));
michael@0 1512
michael@0 1513 let server = httpd_setup({
michael@0 1514 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1515 });
michael@0 1516 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1517
michael@0 1518 let engine = makeRotaryEngine();
michael@0 1519 try {
michael@0 1520 engine._delete = {ids: ['flying', 'rekolok']};
michael@0 1521 engine._syncFinish();
michael@0 1522
michael@0 1523 // The 'flying' and 'rekolok' records were deleted while the
michael@0 1524 // 'scotsman' one wasn't.
michael@0 1525 do_check_eq(collection.payload("flying"), undefined);
michael@0 1526 do_check_true(!!collection.payload("scotsman"));
michael@0 1527 do_check_eq(collection.payload("rekolok"), undefined);
michael@0 1528
michael@0 1529 // The deletion todo list has been reset.
michael@0 1530 do_check_eq(engine._delete.ids, undefined);
michael@0 1531
michael@0 1532 } finally {
michael@0 1533 cleanAndGo(server);
michael@0 1534 }
michael@0 1535 });
michael@0 1536
michael@0 1537
michael@0 1538 add_test(function test_syncFinish_deleteLotsInBatches() {
michael@0 1539 _("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs).");
michael@0 1540
michael@0 1541 Service.identity.username = "foo";
michael@0 1542 let collection = new ServerCollection();
michael@0 1543
michael@0 1544 // Let's count how many times the client does a DELETE request to the server
michael@0 1545 var noOfUploads = 0;
michael@0 1546 collection.delete = (function(orig) {
michael@0 1547 return function() {
michael@0 1548 noOfUploads++;
michael@0 1549 return orig.apply(this, arguments);
michael@0 1550 };
michael@0 1551 }(collection.delete));
michael@0 1552
michael@0 1553 // Create a bunch of records on the server
michael@0 1554 let now = Date.now();
michael@0 1555 for (var i = 0; i < 234; i++) {
michael@0 1556 let id = 'record-no-' + i;
michael@0 1557 let payload = encryptPayload({id: id, denomination: "Record No. " + i});
michael@0 1558 let wbo = new ServerWBO(id, payload);
michael@0 1559 wbo.modified = now / 1000 - 60 * (i + 110);
michael@0 1560 collection.insertWBO(wbo);
michael@0 1561 }
michael@0 1562
michael@0 1563 let server = httpd_setup({
michael@0 1564 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1565 });
michael@0 1566
michael@0 1567 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1568
michael@0 1569 let engine = makeRotaryEngine();
michael@0 1570 try {
michael@0 1571
michael@0 1572 // Confirm initial environment
michael@0 1573 do_check_eq(noOfUploads, 0);
michael@0 1574
michael@0 1575 // Declare what we want to have deleted: all records no. 100 and
michael@0 1576 // up and all records that are less than 200 mins old (which are
michael@0 1577 // records 0 thru 90).
michael@0 1578 engine._delete = {ids: [],
michael@0 1579 newer: now / 1000 - 60 * 200.5};
michael@0 1580 for (i = 100; i < 234; i++) {
michael@0 1581 engine._delete.ids.push('record-no-' + i);
michael@0 1582 }
michael@0 1583
michael@0 1584 engine._syncFinish();
michael@0 1585
michael@0 1586 // Ensure that the appropriate server data has been wiped while
michael@0 1587 // preserving records 90 thru 200.
michael@0 1588 for (i = 0; i < 234; i++) {
michael@0 1589 let id = 'record-no-' + i;
michael@0 1590 if (i <= 90 || i >= 100) {
michael@0 1591 do_check_eq(collection.payload(id), undefined);
michael@0 1592 } else {
michael@0 1593 do_check_true(!!collection.payload(id));
michael@0 1594 }
michael@0 1595 }
michael@0 1596
michael@0 1597 // The deletion was done in batches
michael@0 1598 do_check_eq(noOfUploads, 2 + 1);
michael@0 1599
michael@0 1600 // The deletion todo list has been reset.
michael@0 1601 do_check_eq(engine._delete.ids, undefined);
michael@0 1602
michael@0 1603 } finally {
michael@0 1604 cleanAndGo(server);
michael@0 1605 }
michael@0 1606 });
michael@0 1607
michael@0 1608
michael@0 1609 add_test(function test_sync_partialUpload() {
michael@0 1610 _("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
michael@0 1611
michael@0 1612 Service.identity.username = "foo";
michael@0 1613
michael@0 1614 let collection = new ServerCollection();
michael@0 1615 let server = sync_httpd_setup({
michael@0 1616 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1617 });
michael@0 1618 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1619 generateNewKeys(Service.collectionKeys);
michael@0 1620
michael@0 1621 let engine = makeRotaryEngine();
michael@0 1622 engine.lastSync = 123; // needs to be non-zero so that tracker is queried
michael@0 1623 engine.lastSyncLocal = 456;
michael@0 1624
michael@0 1625 // Let the third upload fail completely
michael@0 1626 var noOfUploads = 0;
michael@0 1627 collection.post = (function(orig) {
michael@0 1628 return function() {
michael@0 1629 if (noOfUploads == 2)
michael@0 1630 throw "FAIL!";
michael@0 1631 noOfUploads++;
michael@0 1632 return orig.apply(this, arguments);
michael@0 1633 };
michael@0 1634 }(collection.post));
michael@0 1635
michael@0 1636 // Create a bunch of records (and server side handlers)
michael@0 1637 for (let i = 0; i < 234; i++) {
michael@0 1638 let id = 'record-no-' + i;
michael@0 1639 engine._store.items[id] = "Record No. " + i;
michael@0 1640 engine._tracker.addChangedID(id, i);
michael@0 1641 // Let two items in the first upload batch fail.
michael@0 1642 if ((i != 23) && (i != 42)) {
michael@0 1643 collection.insert(id);
michael@0 1644 }
michael@0 1645 }
michael@0 1646
michael@0 1647 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1648 new WBORecord(engine.metaURL));
michael@0 1649 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1650 syncID: engine.syncID}};
michael@0 1651
michael@0 1652 try {
michael@0 1653
michael@0 1654 engine.enabled = true;
michael@0 1655 let error;
michael@0 1656 try {
michael@0 1657 engine.sync();
michael@0 1658 } catch (ex) {
michael@0 1659 error = ex;
michael@0 1660 }
michael@0 1661 do_check_true(!!error);
michael@0 1662
michael@0 1663 // The timestamp has been updated.
michael@0 1664 do_check_true(engine.lastSyncLocal > 456);
michael@0 1665
michael@0 1666 for (let i = 0; i < 234; i++) {
michael@0 1667 let id = 'record-no-' + i;
michael@0 1668 // Ensure failed records are back in the tracker:
michael@0 1669 // * records no. 23 and 42 were rejected by the server,
michael@0 1670 // * records no. 200 and higher couldn't be uploaded because we failed
michael@0 1671 // hard on the 3rd upload.
michael@0 1672 if ((i == 23) || (i == 42) || (i >= 200))
michael@0 1673 do_check_eq(engine._tracker.changedIDs[id], i);
michael@0 1674 else
michael@0 1675 do_check_false(id in engine._tracker.changedIDs);
michael@0 1676 }
michael@0 1677
michael@0 1678 } finally {
michael@0 1679 cleanAndGo(server);
michael@0 1680 }
michael@0 1681 });
michael@0 1682
michael@0 1683 add_test(function test_canDecrypt_noCryptoKeys() {
michael@0 1684 _("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection.");
michael@0 1685 Service.identity.username = "foo";
michael@0 1686
michael@0 1687 // Wipe collection keys so we can test the desired scenario.
michael@0 1688 Service.collectionKeys.clear();
michael@0 1689
michael@0 1690 let collection = new ServerCollection();
michael@0 1691 collection._wbos.flying = new ServerWBO(
michael@0 1692 'flying', encryptPayload({id: 'flying',
michael@0 1693 denomination: "LNER Class A3 4472"}));
michael@0 1694
michael@0 1695 let server = sync_httpd_setup({
michael@0 1696 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1697 });
michael@0 1698
michael@0 1699 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1700 let engine = makeRotaryEngine();
michael@0 1701 try {
michael@0 1702
michael@0 1703 do_check_false(engine.canDecrypt());
michael@0 1704
michael@0 1705 } finally {
michael@0 1706 cleanAndGo(server);
michael@0 1707 }
michael@0 1708 });
michael@0 1709
michael@0 1710 add_test(function test_canDecrypt_true() {
michael@0 1711 _("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server.");
michael@0 1712 Service.identity.username = "foo";
michael@0 1713
michael@0 1714 generateNewKeys(Service.collectionKeys);
michael@0 1715
michael@0 1716 let collection = new ServerCollection();
michael@0 1717 collection._wbos.flying = new ServerWBO(
michael@0 1718 'flying', encryptPayload({id: 'flying',
michael@0 1719 denomination: "LNER Class A3 4472"}));
michael@0 1720
michael@0 1721 let server = sync_httpd_setup({
michael@0 1722 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1723 });
michael@0 1724
michael@0 1725 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1726 let engine = makeRotaryEngine();
michael@0 1727 try {
michael@0 1728
michael@0 1729 do_check_true(engine.canDecrypt());
michael@0 1730
michael@0 1731 } finally {
michael@0 1732 cleanAndGo(server);
michael@0 1733 }
michael@0 1734
michael@0 1735 });
michael@0 1736
michael@0 1737 add_test(function test_syncapplied_observer() {
michael@0 1738 Service.identity.username = "foo";
michael@0 1739
michael@0 1740 const NUMBER_OF_RECORDS = 10;
michael@0 1741
michael@0 1742 let engine = makeRotaryEngine();
michael@0 1743
michael@0 1744 // Create a batch of server side records.
michael@0 1745 let collection = new ServerCollection();
michael@0 1746 for (var i = 0; i < NUMBER_OF_RECORDS; i++) {
michael@0 1747 let id = 'record-no-' + i;
michael@0 1748 let payload = encryptPayload({id: id, denomination: "Record No. " + id});
michael@0 1749 collection.insert(id, payload);
michael@0 1750 }
michael@0 1751
michael@0 1752 let server = httpd_setup({
michael@0 1753 "/1.1/foo/storage/rotary": collection.handler()
michael@0 1754 });
michael@0 1755
michael@0 1756 let syncTesting = new SyncTestingInfrastructure(server);
michael@0 1757
michael@0 1758 let meta_global = Service.recordManager.set(engine.metaURL,
michael@0 1759 new WBORecord(engine.metaURL));
michael@0 1760 meta_global.payload.engines = {rotary: {version: engine.version,
michael@0 1761 syncID: engine.syncID}};
michael@0 1762
michael@0 1763 let numApplyCalls = 0;
michael@0 1764 let engine_name;
michael@0 1765 let count;
michael@0 1766 function onApplied(subject, data) {
michael@0 1767 numApplyCalls++;
michael@0 1768 engine_name = data;
michael@0 1769 count = subject;
michael@0 1770 }
michael@0 1771
michael@0 1772 Svc.Obs.add("weave:engine:sync:applied", onApplied);
michael@0 1773
michael@0 1774 try {
michael@0 1775 Service.scheduler.hasIncomingItems = false;
michael@0 1776
michael@0 1777 // Do sync.
michael@0 1778 engine._syncStartup();
michael@0 1779 engine._processIncoming();
michael@0 1780
michael@0 1781 do_check_attribute_count(engine._store.items, 10);
michael@0 1782
michael@0 1783 do_check_eq(numApplyCalls, 1);
michael@0 1784 do_check_eq(engine_name, "rotary");
michael@0 1785 do_check_eq(count.applied, 10);
michael@0 1786
michael@0 1787 do_check_true(Service.scheduler.hasIncomingItems);
michael@0 1788 } finally {
michael@0 1789 cleanAndGo(server);
michael@0 1790 Service.scheduler.hasIncomingItems = false;
michael@0 1791 Svc.Obs.remove("weave:engine:sync:applied", onApplied);
michael@0 1792 }
michael@0 1793 });

mercurial