|
1 /* Any copyright is dedicated to the Public Domain. |
|
2 * http://creativecommons.org/publicdomain/zero/1.0/ */ |
|
3 |
|
4 Cu.import("resource://services-sync/constants.js"); |
|
5 Cu.import("resource://services-sync/engines.js"); |
|
6 Cu.import("resource://services-sync/policies.js"); |
|
7 Cu.import("resource://services-sync/record.js"); |
|
8 Cu.import("resource://services-sync/resource.js"); |
|
9 Cu.import("resource://services-sync/service.js"); |
|
10 Cu.import("resource://services-sync/util.js"); |
|
11 Cu.import("resource://testing-common/services/sync/rotaryengine.js"); |
|
12 Cu.import("resource://testing-common/services/sync/utils.js"); |
|
13 |
|
14 function makeRotaryEngine() { |
|
15 return new RotaryEngine(Service); |
|
16 } |
|
17 |
|
18 function cleanAndGo(server) { |
|
19 Svc.Prefs.resetBranch(""); |
|
20 Svc.Prefs.set("log.logger.engine.rotary", "Trace"); |
|
21 Service.recordManager.clearCache(); |
|
22 server.stop(run_next_test); |
|
23 } |
|
24 |
|
25 function configureService(server, username, password) { |
|
26 Service.clusterURL = server.baseURI; |
|
27 |
|
28 Service.identity.account = username || "foo"; |
|
29 Service.identity.basicPassword = password || "password"; |
|
30 } |
|
31 |
|
32 function createServerAndConfigureClient() { |
|
33 let engine = new RotaryEngine(Service); |
|
34 |
|
35 let contents = { |
|
36 meta: {global: {engines: {rotary: {version: engine.version, |
|
37 syncID: engine.syncID}}}}, |
|
38 crypto: {}, |
|
39 rotary: {} |
|
40 }; |
|
41 |
|
42 const USER = "foo"; |
|
43 let server = new SyncServer(); |
|
44 server.registerUser(USER, "password"); |
|
45 server.createContents(USER, contents); |
|
46 server.start(); |
|
47 |
|
48 Service.serverURL = server.baseURI; |
|
49 Service.clusterURL = server.baseURI; |
|
50 Service.identity.username = USER; |
|
51 Service._updateCachedURLs(); |
|
52 |
|
53 return [engine, server, USER]; |
|
54 } |
|
55 |
|
56 function run_test() { |
|
57 generateNewKeys(Service.collectionKeys); |
|
58 Svc.Prefs.set("log.logger.engine.rotary", "Trace"); |
|
59 run_next_test(); |
|
60 } |
|
61 |
|
62 /* |
|
63 * Tests |
|
64 * |
|
65 * SyncEngine._sync() is divided into four rather independent steps: |
|
66 * |
|
67 * - _syncStartup() |
|
68 * - _processIncoming() |
|
69 * - _uploadOutgoing() |
|
70 * - _syncFinish() |
|
71 * |
|
72 * In the spirit of unit testing, these are tested individually for |
|
73 * different scenarios below. |
|
74 */ |
|
75 |
|
76 add_test(function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() { |
|
77 _("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record"); |
|
78 |
|
79 // Some server side data that's going to be wiped |
|
80 let collection = new ServerCollection(); |
|
81 collection.insert('flying', |
|
82 encryptPayload({id: 'flying', |
|
83 denomination: "LNER Class A3 4472"})); |
|
84 collection.insert('scotsman', |
|
85 encryptPayload({id: 'scotsman', |
|
86 denomination: "Flying Scotsman"})); |
|
87 |
|
88 let server = sync_httpd_setup({ |
|
89 "/1.1/foo/storage/rotary": collection.handler() |
|
90 }); |
|
91 |
|
92 let syncTesting = new SyncTestingInfrastructure(server); |
|
93 Service.identity.username = "foo"; |
|
94 |
|
95 let engine = makeRotaryEngine(); |
|
96 engine._store.items = {rekolok: "Rekonstruktionslokomotive"}; |
|
97 try { |
|
98 |
|
99 // Confirm initial environment |
|
100 do_check_eq(engine._tracker.changedIDs["rekolok"], undefined); |
|
101 let metaGlobal = Service.recordManager.get(engine.metaURL); |
|
102 do_check_eq(metaGlobal.payload.engines, undefined); |
|
103 do_check_true(!!collection.payload("flying")); |
|
104 do_check_true(!!collection.payload("scotsman")); |
|
105 |
|
106 engine.lastSync = Date.now() / 1000; |
|
107 engine.lastSyncLocal = Date.now(); |
|
108 |
|
109 // Trying to prompt a wipe -- we no longer track CryptoMeta per engine, |
|
110 // so it has nothing to check. |
|
111 engine._syncStartup(); |
|
112 |
|
113 // The meta/global WBO has been filled with data about the engine |
|
114 let engineData = metaGlobal.payload.engines["rotary"]; |
|
115 do_check_eq(engineData.version, engine.version); |
|
116 do_check_eq(engineData.syncID, engine.syncID); |
|
117 |
|
118 // Sync was reset and server data was wiped |
|
119 do_check_eq(engine.lastSync, 0); |
|
120 do_check_eq(collection.payload("flying"), undefined); |
|
121 do_check_eq(collection.payload("scotsman"), undefined); |
|
122 |
|
123 } finally { |
|
124 cleanAndGo(server); |
|
125 } |
|
126 }); |
|
127 |
|
128 add_test(function test_syncStartup_serverHasNewerVersion() { |
|
129 _("SyncEngine._syncStartup "); |
|
130 |
|
131 let global = new ServerWBO('global', {engines: {rotary: {version: 23456}}}); |
|
132 let server = httpd_setup({ |
|
133 "/1.1/foo/storage/meta/global": global.handler() |
|
134 }); |
|
135 |
|
136 let syncTesting = new SyncTestingInfrastructure(server); |
|
137 Service.identity.username = "foo"; |
|
138 |
|
139 let engine = makeRotaryEngine(); |
|
140 try { |
|
141 |
|
142 // The server has a newer version of the data and our engine can |
|
143 // handle. That should give us an exception. |
|
144 let error; |
|
145 try { |
|
146 engine._syncStartup(); |
|
147 } catch (ex) { |
|
148 error = ex; |
|
149 } |
|
150 do_check_eq(error.failureCode, VERSION_OUT_OF_DATE); |
|
151 |
|
152 } finally { |
|
153 cleanAndGo(server); |
|
154 } |
|
155 }); |
|
156 |
|
157 |
|
158 add_test(function test_syncStartup_syncIDMismatchResetsClient() { |
|
159 _("SyncEngine._syncStartup resets sync if syncIDs don't match"); |
|
160 |
|
161 let server = sync_httpd_setup({}); |
|
162 let syncTesting = new SyncTestingInfrastructure(server); |
|
163 Service.identity.username = "foo"; |
|
164 |
|
165 // global record with a different syncID than our engine has |
|
166 let engine = makeRotaryEngine(); |
|
167 let global = new ServerWBO('global', |
|
168 {engines: {rotary: {version: engine.version, |
|
169 syncID: 'foobar'}}}); |
|
170 server.registerPathHandler("/1.1/foo/storage/meta/global", global.handler()); |
|
171 |
|
172 try { |
|
173 |
|
174 // Confirm initial environment |
|
175 do_check_eq(engine.syncID, 'fake-guid-0'); |
|
176 do_check_eq(engine._tracker.changedIDs["rekolok"], undefined); |
|
177 |
|
178 engine.lastSync = Date.now() / 1000; |
|
179 engine.lastSyncLocal = Date.now(); |
|
180 engine._syncStartup(); |
|
181 |
|
182 // The engine has assumed the server's syncID |
|
183 do_check_eq(engine.syncID, 'foobar'); |
|
184 |
|
185 // Sync was reset |
|
186 do_check_eq(engine.lastSync, 0); |
|
187 |
|
188 } finally { |
|
189 cleanAndGo(server); |
|
190 } |
|
191 }); |
|
192 |
|
193 |
|
194 add_test(function test_processIncoming_emptyServer() { |
|
195 _("SyncEngine._processIncoming working with an empty server backend"); |
|
196 |
|
197 let collection = new ServerCollection(); |
|
198 let server = sync_httpd_setup({ |
|
199 "/1.1/foo/storage/rotary": collection.handler() |
|
200 }); |
|
201 |
|
202 let syncTesting = new SyncTestingInfrastructure(server); |
|
203 Service.identity.username = "foo"; |
|
204 |
|
205 let engine = makeRotaryEngine(); |
|
206 try { |
|
207 |
|
208 // Merely ensure that this code path is run without any errors |
|
209 engine._processIncoming(); |
|
210 do_check_eq(engine.lastSync, 0); |
|
211 |
|
212 } finally { |
|
213 cleanAndGo(server); |
|
214 } |
|
215 }); |
|
216 |
|
217 |
|
218 add_test(function test_processIncoming_createFromServer() { |
|
219 _("SyncEngine._processIncoming creates new records from server data"); |
|
220 |
|
221 // Some server records that will be downloaded |
|
222 let collection = new ServerCollection(); |
|
223 collection.insert('flying', |
|
224 encryptPayload({id: 'flying', |
|
225 denomination: "LNER Class A3 4472"})); |
|
226 collection.insert('scotsman', |
|
227 encryptPayload({id: 'scotsman', |
|
228 denomination: "Flying Scotsman"})); |
|
229 |
|
230 // Two pathological cases involving relative URIs gone wrong. |
|
231 let pathologicalPayload = encryptPayload({id: '../pathological', |
|
232 denomination: "Pathological Case"}); |
|
233 collection.insert('../pathological', pathologicalPayload); |
|
234 |
|
235 let server = sync_httpd_setup({ |
|
236 "/1.1/foo/storage/rotary": collection.handler(), |
|
237 "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(), |
|
238 "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler() |
|
239 }); |
|
240 |
|
241 let syncTesting = new SyncTestingInfrastructure(server); |
|
242 Service.identity.username = "foo"; |
|
243 |
|
244 generateNewKeys(Service.collectionKeys); |
|
245 |
|
246 let engine = makeRotaryEngine(); |
|
247 let meta_global = Service.recordManager.set(engine.metaURL, |
|
248 new WBORecord(engine.metaURL)); |
|
249 meta_global.payload.engines = {rotary: {version: engine.version, |
|
250 syncID: engine.syncID}}; |
|
251 |
|
252 try { |
|
253 |
|
254 // Confirm initial environment |
|
255 do_check_eq(engine.lastSync, 0); |
|
256 do_check_eq(engine.lastModified, null); |
|
257 do_check_eq(engine._store.items.flying, undefined); |
|
258 do_check_eq(engine._store.items.scotsman, undefined); |
|
259 do_check_eq(engine._store.items['../pathological'], undefined); |
|
260 |
|
261 engine._syncStartup(); |
|
262 engine._processIncoming(); |
|
263 |
|
264 // Timestamps of last sync and last server modification are set. |
|
265 do_check_true(engine.lastSync > 0); |
|
266 do_check_true(engine.lastModified > 0); |
|
267 |
|
268 // Local records have been created from the server data. |
|
269 do_check_eq(engine._store.items.flying, "LNER Class A3 4472"); |
|
270 do_check_eq(engine._store.items.scotsman, "Flying Scotsman"); |
|
271 do_check_eq(engine._store.items['../pathological'], "Pathological Case"); |
|
272 |
|
273 } finally { |
|
274 cleanAndGo(server); |
|
275 } |
|
276 }); |
|
277 |
|
278 |
|
279 add_test(function test_processIncoming_reconcile() { |
|
280 _("SyncEngine._processIncoming updates local records"); |
|
281 |
|
282 let collection = new ServerCollection(); |
|
283 |
|
284 // This server record is newer than the corresponding client one, |
|
285 // so it'll update its data. |
|
286 collection.insert('newrecord', |
|
287 encryptPayload({id: 'newrecord', |
|
288 denomination: "New stuff..."})); |
|
289 |
|
290 // This server record is newer than the corresponding client one, |
|
291 // so it'll update its data. |
|
292 collection.insert('newerserver', |
|
293 encryptPayload({id: 'newerserver', |
|
294 denomination: "New data!"})); |
|
295 |
|
296 // This server record is 2 mins older than the client counterpart |
|
297 // but identical to it, so we're expecting the client record's |
|
298 // changedID to be reset. |
|
299 collection.insert('olderidentical', |
|
300 encryptPayload({id: 'olderidentical', |
|
301 denomination: "Older but identical"})); |
|
302 collection._wbos.olderidentical.modified -= 120; |
|
303 |
|
304 // This item simply has different data than the corresponding client |
|
305 // record (which is unmodified), so it will update the client as well |
|
306 collection.insert('updateclient', |
|
307 encryptPayload({id: 'updateclient', |
|
308 denomination: "Get this!"})); |
|
309 |
|
310 // This is a dupe of 'original'. |
|
311 collection.insert('duplication', |
|
312 encryptPayload({id: 'duplication', |
|
313 denomination: "Original Entry"})); |
|
314 |
|
315 // This record is marked as deleted, so we're expecting the client |
|
316 // record to be removed. |
|
317 collection.insert('nukeme', |
|
318 encryptPayload({id: 'nukeme', |
|
319 denomination: "Nuke me!", |
|
320 deleted: true})); |
|
321 |
|
322 let server = sync_httpd_setup({ |
|
323 "/1.1/foo/storage/rotary": collection.handler() |
|
324 }); |
|
325 |
|
326 let syncTesting = new SyncTestingInfrastructure(server); |
|
327 Service.identity.username = "foo"; |
|
328 |
|
329 let engine = makeRotaryEngine(); |
|
330 engine._store.items = {newerserver: "New data, but not as new as server!", |
|
331 olderidentical: "Older but identical", |
|
332 updateclient: "Got data?", |
|
333 original: "Original Entry", |
|
334 long_original: "Long Original Entry", |
|
335 nukeme: "Nuke me!"}; |
|
336 // Make this record 1 min old, thus older than the one on the server |
|
337 engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60); |
|
338 // This record has been changed 2 mins later than the one on the server |
|
339 engine._tracker.addChangedID('olderidentical', Date.now()/1000); |
|
340 |
|
341 let meta_global = Service.recordManager.set(engine.metaURL, |
|
342 new WBORecord(engine.metaURL)); |
|
343 meta_global.payload.engines = {rotary: {version: engine.version, |
|
344 syncID: engine.syncID}}; |
|
345 |
|
346 try { |
|
347 |
|
348 // Confirm initial environment |
|
349 do_check_eq(engine._store.items.newrecord, undefined); |
|
350 do_check_eq(engine._store.items.newerserver, "New data, but not as new as server!"); |
|
351 do_check_eq(engine._store.items.olderidentical, "Older but identical"); |
|
352 do_check_eq(engine._store.items.updateclient, "Got data?"); |
|
353 do_check_eq(engine._store.items.nukeme, "Nuke me!"); |
|
354 do_check_true(engine._tracker.changedIDs['olderidentical'] > 0); |
|
355 |
|
356 engine._syncStartup(); |
|
357 engine._processIncoming(); |
|
358 |
|
359 // Timestamps of last sync and last server modification are set. |
|
360 do_check_true(engine.lastSync > 0); |
|
361 do_check_true(engine.lastModified > 0); |
|
362 |
|
363 // The new record is created. |
|
364 do_check_eq(engine._store.items.newrecord, "New stuff..."); |
|
365 |
|
366 // The 'newerserver' record is updated since the server data is newer. |
|
367 do_check_eq(engine._store.items.newerserver, "New data!"); |
|
368 |
|
369 // The data for 'olderidentical' is identical on the server, so |
|
370 // it's no longer marked as changed anymore. |
|
371 do_check_eq(engine._store.items.olderidentical, "Older but identical"); |
|
372 do_check_eq(engine._tracker.changedIDs['olderidentical'], undefined); |
|
373 |
|
374 // Updated with server data. |
|
375 do_check_eq(engine._store.items.updateclient, "Get this!"); |
|
376 |
|
377 // The incoming ID is preferred. |
|
378 do_check_eq(engine._store.items.original, undefined); |
|
379 do_check_eq(engine._store.items.duplication, "Original Entry"); |
|
380 do_check_neq(engine._delete.ids.indexOf("original"), -1); |
|
381 |
|
382 // The 'nukeme' record marked as deleted is removed. |
|
383 do_check_eq(engine._store.items.nukeme, undefined); |
|
384 } finally { |
|
385 cleanAndGo(server); |
|
386 } |
|
387 }); |
|
388 |
|
389 add_test(function test_processIncoming_reconcile_local_deleted() { |
|
390 _("Ensure local, duplicate ID is deleted on server."); |
|
391 |
|
392 // When a duplicate is resolved, the local ID (which is never taken) should |
|
393 // be deleted on the server. |
|
394 let [engine, server, user] = createServerAndConfigureClient(); |
|
395 |
|
396 let now = Date.now() / 1000 - 10; |
|
397 engine.lastSync = now; |
|
398 engine.lastModified = now + 1; |
|
399 |
|
400 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); |
|
401 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); |
|
402 server.insertWBO(user, "rotary", wbo); |
|
403 |
|
404 let record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"}); |
|
405 let wbo = new ServerWBO("DUPE_LOCAL", record, now - 1); |
|
406 server.insertWBO(user, "rotary", wbo); |
|
407 |
|
408 engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); |
|
409 do_check_true(engine._store.itemExists("DUPE_LOCAL")); |
|
410 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); |
|
411 |
|
412 engine._sync(); |
|
413 |
|
414 do_check_attribute_count(engine._store.items, 1); |
|
415 do_check_true("DUPE_INCOMING" in engine._store.items); |
|
416 |
|
417 let collection = server.getCollection(user, "rotary"); |
|
418 do_check_eq(1, collection.count()); |
|
419 do_check_neq(undefined, collection.wbo("DUPE_INCOMING")); |
|
420 |
|
421 cleanAndGo(server); |
|
422 }); |
|
423 |
|
424 add_test(function test_processIncoming_reconcile_equivalent() { |
|
425 _("Ensure proper handling of incoming records that match local."); |
|
426 |
|
427 let [engine, server, user] = createServerAndConfigureClient(); |
|
428 |
|
429 let now = Date.now() / 1000 - 10; |
|
430 engine.lastSync = now; |
|
431 engine.lastModified = now + 1; |
|
432 |
|
433 let record = encryptPayload({id: "entry", denomination: "denomination"}); |
|
434 let wbo = new ServerWBO("entry", record, now + 2); |
|
435 server.insertWBO(user, "rotary", wbo); |
|
436 |
|
437 engine._store.items = {entry: "denomination"}; |
|
438 do_check_true(engine._store.itemExists("entry")); |
|
439 |
|
440 engine._sync(); |
|
441 |
|
442 do_check_attribute_count(engine._store.items, 1); |
|
443 |
|
444 cleanAndGo(server); |
|
445 }); |
|
446 |
|
447 add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() { |
|
448 _("Ensure locally deleted duplicate record newer than incoming is handled."); |
|
449 |
|
450 // This is a somewhat complicated test. It ensures that if a client receives |
|
451 // a modified record for an item that is deleted locally but with a different |
|
452 // ID that the incoming record is ignored. This is a corner case for record |
|
453 // handling, but it needs to be supported. |
|
454 let [engine, server, user] = createServerAndConfigureClient(); |
|
455 |
|
456 let now = Date.now() / 1000 - 10; |
|
457 engine.lastSync = now; |
|
458 engine.lastModified = now + 1; |
|
459 |
|
460 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); |
|
461 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); |
|
462 server.insertWBO(user, "rotary", wbo); |
|
463 |
|
464 // Simulate a locally-deleted item. |
|
465 engine._store.items = {}; |
|
466 engine._tracker.addChangedID("DUPE_LOCAL", now + 3); |
|
467 do_check_false(engine._store.itemExists("DUPE_LOCAL")); |
|
468 do_check_false(engine._store.itemExists("DUPE_INCOMING")); |
|
469 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); |
|
470 |
|
471 engine._sync(); |
|
472 |
|
473 // After the sync, the server's payload for the original ID should be marked |
|
474 // as deleted. |
|
475 do_check_empty(engine._store.items); |
|
476 let collection = server.getCollection(user, "rotary"); |
|
477 do_check_eq(1, collection.count()); |
|
478 let wbo = collection.wbo("DUPE_INCOMING"); |
|
479 do_check_neq(null, wbo); |
|
480 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); |
|
481 do_check_true(payload.deleted); |
|
482 |
|
483 cleanAndGo(server); |
|
484 }); |
|
485 |
|
486 add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() { |
|
487 _("Ensure locally deleted duplicate record older than incoming is restored."); |
|
488 |
|
489 // This is similar to the above test except it tests the condition where the |
|
490 // incoming record is newer than the local deletion, therefore overriding it. |
|
491 |
|
492 let [engine, server, user] = createServerAndConfigureClient(); |
|
493 |
|
494 let now = Date.now() / 1000 - 10; |
|
495 engine.lastSync = now; |
|
496 engine.lastModified = now + 1; |
|
497 |
|
498 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); |
|
499 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); |
|
500 server.insertWBO(user, "rotary", wbo); |
|
501 |
|
502 // Simulate a locally-deleted item. |
|
503 engine._store.items = {}; |
|
504 engine._tracker.addChangedID("DUPE_LOCAL", now + 1); |
|
505 do_check_false(engine._store.itemExists("DUPE_LOCAL")); |
|
506 do_check_false(engine._store.itemExists("DUPE_INCOMING")); |
|
507 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); |
|
508 |
|
509 engine._sync(); |
|
510 |
|
511 // Since the remote change is newer, the incoming item should exist locally. |
|
512 do_check_attribute_count(engine._store.items, 1); |
|
513 do_check_true("DUPE_INCOMING" in engine._store.items); |
|
514 do_check_eq("incoming", engine._store.items.DUPE_INCOMING); |
|
515 |
|
516 let collection = server.getCollection(user, "rotary"); |
|
517 do_check_eq(1, collection.count()); |
|
518 let wbo = collection.wbo("DUPE_INCOMING"); |
|
519 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); |
|
520 do_check_eq("incoming", payload.denomination); |
|
521 |
|
522 cleanAndGo(server); |
|
523 }); |
|
524 |
|
525 add_test(function test_processIncoming_reconcile_changed_dupe() { |
|
526 _("Ensure that locally changed duplicate record is handled properly."); |
|
527 |
|
528 let [engine, server, user] = createServerAndConfigureClient(); |
|
529 |
|
530 let now = Date.now() / 1000 - 10; |
|
531 engine.lastSync = now; |
|
532 engine.lastModified = now + 1; |
|
533 |
|
534 // The local record is newer than the incoming one, so it should be retained. |
|
535 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); |
|
536 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); |
|
537 server.insertWBO(user, "rotary", wbo); |
|
538 |
|
539 engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); |
|
540 engine._tracker.addChangedID("DUPE_LOCAL", now + 3); |
|
541 do_check_true(engine._store.itemExists("DUPE_LOCAL")); |
|
542 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); |
|
543 |
|
544 engine._sync(); |
|
545 |
|
546 // The ID should have been changed to incoming. |
|
547 do_check_attribute_count(engine._store.items, 1); |
|
548 do_check_true("DUPE_INCOMING" in engine._store.items); |
|
549 |
|
550 // On the server, the local ID should be deleted and the incoming ID should |
|
551 // have its payload set to what was in the local record. |
|
552 let collection = server.getCollection(user, "rotary"); |
|
553 do_check_eq(1, collection.count()); |
|
554 let wbo = collection.wbo("DUPE_INCOMING"); |
|
555 do_check_neq(undefined, wbo); |
|
556 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); |
|
557 do_check_eq("local", payload.denomination); |
|
558 |
|
559 cleanAndGo(server); |
|
560 }); |
|
561 |
|
562 add_test(function test_processIncoming_reconcile_changed_dupe_new() { |
|
563 _("Ensure locally changed duplicate record older than incoming is ignored."); |
|
564 |
|
565 // This test is similar to the above except the incoming record is younger |
|
566 // than the local record. The incoming record should be authoritative. |
|
567 let [engine, server, user] = createServerAndConfigureClient(); |
|
568 |
|
569 let now = Date.now() / 1000 - 10; |
|
570 engine.lastSync = now; |
|
571 engine.lastModified = now + 1; |
|
572 |
|
573 let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"}); |
|
574 let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2); |
|
575 server.insertWBO(user, "rotary", wbo); |
|
576 |
|
577 engine._store.create({id: "DUPE_LOCAL", denomination: "local"}); |
|
578 engine._tracker.addChangedID("DUPE_LOCAL", now + 1); |
|
579 do_check_true(engine._store.itemExists("DUPE_LOCAL")); |
|
580 do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"})); |
|
581 |
|
582 engine._sync(); |
|
583 |
|
584 // The ID should have been changed to incoming. |
|
585 do_check_attribute_count(engine._store.items, 1); |
|
586 do_check_true("DUPE_INCOMING" in engine._store.items); |
|
587 |
|
588 // On the server, the local ID should be deleted and the incoming ID should |
|
589 // have its payload retained. |
|
590 let collection = server.getCollection(user, "rotary"); |
|
591 do_check_eq(1, collection.count()); |
|
592 let wbo = collection.wbo("DUPE_INCOMING"); |
|
593 do_check_neq(undefined, wbo); |
|
594 let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext); |
|
595 do_check_eq("incoming", payload.denomination); |
|
596 cleanAndGo(server); |
|
597 }); |
|
598 |
|
599 add_test(function test_processIncoming_mobile_batchSize() { |
|
600 _("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients"); |
|
601 |
|
602 Svc.Prefs.set("client.type", "mobile"); |
|
603 Service.identity.username = "foo"; |
|
604 |
|
605 // A collection that logs each GET |
|
606 let collection = new ServerCollection(); |
|
607 collection.get_log = []; |
|
608 collection._get = collection.get; |
|
609 collection.get = function (options) { |
|
610 this.get_log.push(options); |
|
611 return this._get(options); |
|
612 }; |
|
613 |
|
614 // Let's create some 234 server side records. They're all at least |
|
615 // 10 minutes old. |
|
616 for (let i = 0; i < 234; i++) { |
|
617 let id = 'record-no-' + i; |
|
618 let payload = encryptPayload({id: id, denomination: "Record No. " + i}); |
|
619 let wbo = new ServerWBO(id, payload); |
|
620 wbo.modified = Date.now()/1000 - 60*(i+10); |
|
621 collection.insertWBO(wbo); |
|
622 } |
|
623 |
|
624 let server = sync_httpd_setup({ |
|
625 "/1.1/foo/storage/rotary": collection.handler() |
|
626 }); |
|
627 |
|
628 let syncTesting = new SyncTestingInfrastructure(server); |
|
629 |
|
630 let engine = makeRotaryEngine(); |
|
631 let meta_global = Service.recordManager.set(engine.metaURL, |
|
632 new WBORecord(engine.metaURL)); |
|
633 meta_global.payload.engines = {rotary: {version: engine.version, |
|
634 syncID: engine.syncID}}; |
|
635 |
|
636 try { |
|
637 |
|
638 _("On a mobile client, we get new records from the server in batches of 50."); |
|
639 engine._syncStartup(); |
|
640 engine._processIncoming(); |
|
641 do_check_attribute_count(engine._store.items, 234); |
|
642 do_check_true('record-no-0' in engine._store.items); |
|
643 do_check_true('record-no-49' in engine._store.items); |
|
644 do_check_true('record-no-50' in engine._store.items); |
|
645 do_check_true('record-no-233' in engine._store.items); |
|
646 |
|
647 // Verify that the right number of GET requests with the right |
|
648 // kind of parameters were made. |
|
649 do_check_eq(collection.get_log.length, |
|
650 Math.ceil(234 / MOBILE_BATCH_SIZE) + 1); |
|
651 do_check_eq(collection.get_log[0].full, 1); |
|
652 do_check_eq(collection.get_log[0].limit, MOBILE_BATCH_SIZE); |
|
653 do_check_eq(collection.get_log[1].full, undefined); |
|
654 do_check_eq(collection.get_log[1].limit, undefined); |
|
655 for (let i = 1; i <= Math.floor(234 / MOBILE_BATCH_SIZE); i++) { |
|
656 do_check_eq(collection.get_log[i+1].full, 1); |
|
657 do_check_eq(collection.get_log[i+1].limit, undefined); |
|
658 if (i < Math.floor(234 / MOBILE_BATCH_SIZE)) |
|
659 do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE); |
|
660 else |
|
661 do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE); |
|
662 } |
|
663 |
|
664 } finally { |
|
665 cleanAndGo(server); |
|
666 } |
|
667 }); |
|
668 |
|
669 |
|
670 add_test(function test_processIncoming_store_toFetch() { |
|
671 _("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync."); |
|
672 Service.identity.username = "foo"; |
|
673 Svc.Prefs.set("client.type", "mobile"); |
|
674 |
|
675 // A collection that throws at the fourth get. |
|
676 let collection = new ServerCollection(); |
|
677 collection._get_calls = 0; |
|
678 collection._get = collection.get; |
|
679 collection.get = function() { |
|
680 this._get_calls += 1; |
|
681 if (this._get_calls > 3) { |
|
682 throw "Abort on fourth call!"; |
|
683 } |
|
684 return this._get.apply(this, arguments); |
|
685 }; |
|
686 |
|
687 // Let's create three batches worth of server side records. |
|
688 for (var i = 0; i < MOBILE_BATCH_SIZE * 3; i++) { |
|
689 let id = 'record-no-' + i; |
|
690 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
691 let wbo = new ServerWBO(id, payload); |
|
692 wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3); |
|
693 collection.insertWBO(wbo); |
|
694 } |
|
695 |
|
696 let engine = makeRotaryEngine(); |
|
697 engine.enabled = true; |
|
698 |
|
699 let server = sync_httpd_setup({ |
|
700 "/1.1/foo/storage/rotary": collection.handler() |
|
701 }); |
|
702 |
|
703 let syncTesting = new SyncTestingInfrastructure(server); |
|
704 |
|
705 let meta_global = Service.recordManager.set(engine.metaURL, |
|
706 new WBORecord(engine.metaURL)); |
|
707 meta_global.payload.engines = {rotary: {version: engine.version, |
|
708 syncID: engine.syncID}}; |
|
709 try { |
|
710 |
|
711 // Confirm initial environment |
|
712 do_check_eq(engine.lastSync, 0); |
|
713 do_check_empty(engine._store.items); |
|
714 |
|
715 let error; |
|
716 try { |
|
717 engine.sync(); |
|
718 } catch (ex) { |
|
719 error = ex; |
|
720 } |
|
721 do_check_true(!!error); |
|
722 |
|
723 // Only the first two batches have been applied. |
|
724 do_check_eq(Object.keys(engine._store.items).length, |
|
725 MOBILE_BATCH_SIZE * 2); |
|
726 |
|
727 // The third batch is stuck in toFetch. lastSync has been moved forward to |
|
728 // the last successful item's timestamp. |
|
729 do_check_eq(engine.toFetch.length, MOBILE_BATCH_SIZE); |
|
730 do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified); |
|
731 |
|
732 } finally { |
|
733 cleanAndGo(server); |
|
734 } |
|
735 }); |
|
736 |
|
737 |
|
738 add_test(function test_processIncoming_resume_toFetch() { |
|
739 _("toFetch and previousFailed items left over from previous syncs are fetched on the next sync, along with new items."); |
|
740 Service.identity.username = "foo"; |
|
741 |
|
742 const LASTSYNC = Date.now() / 1000; |
|
743 |
|
744 // Server records that will be downloaded |
|
745 let collection = new ServerCollection(); |
|
746 collection.insert('flying', |
|
747 encryptPayload({id: 'flying', |
|
748 denomination: "LNER Class A3 4472"})); |
|
749 collection.insert('scotsman', |
|
750 encryptPayload({id: 'scotsman', |
|
751 denomination: "Flying Scotsman"})); |
|
752 collection.insert('rekolok', |
|
753 encryptPayload({id: 'rekolok', |
|
754 denomination: "Rekonstruktionslokomotive"})); |
|
755 for (let i = 0; i < 3; i++) { |
|
756 let id = 'failed' + i; |
|
757 let payload = encryptPayload({id: id, denomination: "Record No. " + i}); |
|
758 let wbo = new ServerWBO(id, payload); |
|
759 wbo.modified = LASTSYNC - 10; |
|
760 collection.insertWBO(wbo); |
|
761 } |
|
762 |
|
763 collection.wbo("flying").modified = |
|
764 collection.wbo("scotsman").modified = LASTSYNC - 10; |
|
765 collection._wbos.rekolok.modified = LASTSYNC + 10; |
|
766 |
|
767 // Time travel 10 seconds into the future but still download the above WBOs. |
|
768 let engine = makeRotaryEngine(); |
|
769 engine.lastSync = LASTSYNC; |
|
770 engine.toFetch = ["flying", "scotsman"]; |
|
771 engine.previousFailed = ["failed0", "failed1", "failed2"]; |
|
772 |
|
773 let server = sync_httpd_setup({ |
|
774 "/1.1/foo/storage/rotary": collection.handler() |
|
775 }); |
|
776 |
|
777 let syncTesting = new SyncTestingInfrastructure(server); |
|
778 |
|
779 let meta_global = Service.recordManager.set(engine.metaURL, |
|
780 new WBORecord(engine.metaURL)); |
|
781 meta_global.payload.engines = {rotary: {version: engine.version, |
|
782 syncID: engine.syncID}}; |
|
783 try { |
|
784 |
|
785 // Confirm initial environment |
|
786 do_check_eq(engine._store.items.flying, undefined); |
|
787 do_check_eq(engine._store.items.scotsman, undefined); |
|
788 do_check_eq(engine._store.items.rekolok, undefined); |
|
789 |
|
790 engine._syncStartup(); |
|
791 engine._processIncoming(); |
|
792 |
|
793 // Local records have been created from the server data. |
|
794 do_check_eq(engine._store.items.flying, "LNER Class A3 4472"); |
|
795 do_check_eq(engine._store.items.scotsman, "Flying Scotsman"); |
|
796 do_check_eq(engine._store.items.rekolok, "Rekonstruktionslokomotive"); |
|
797 do_check_eq(engine._store.items.failed0, "Record No. 0"); |
|
798 do_check_eq(engine._store.items.failed1, "Record No. 1"); |
|
799 do_check_eq(engine._store.items.failed2, "Record No. 2"); |
|
800 do_check_eq(engine.previousFailed.length, 0); |
|
801 } finally { |
|
802 cleanAndGo(server); |
|
803 } |
|
804 }); |
|
805 |
|
806 |
|
807 add_test(function test_processIncoming_applyIncomingBatchSize_smaller() { |
|
808 _("Ensure that a number of incoming items less than applyIncomingBatchSize is still applied."); |
|
809 Service.identity.username = "foo"; |
|
810 |
|
811 // Engine that doesn't like the first and last record it's given. |
|
812 const APPLY_BATCH_SIZE = 10; |
|
813 let engine = makeRotaryEngine(); |
|
814 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; |
|
815 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; |
|
816 engine._store.applyIncomingBatch = function (records) { |
|
817 let failed1 = records.shift(); |
|
818 let failed2 = records.pop(); |
|
819 this._applyIncomingBatch(records); |
|
820 return [failed1.id, failed2.id]; |
|
821 }; |
|
822 |
|
823 // Let's create less than a batch worth of server side records. |
|
824 let collection = new ServerCollection(); |
|
825 for (let i = 0; i < APPLY_BATCH_SIZE - 1; i++) { |
|
826 let id = 'record-no-' + i; |
|
827 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
828 collection.insert(id, payload); |
|
829 } |
|
830 |
|
831 let server = sync_httpd_setup({ |
|
832 "/1.1/foo/storage/rotary": collection.handler() |
|
833 }); |
|
834 |
|
835 let syncTesting = new SyncTestingInfrastructure(server); |
|
836 |
|
837 let meta_global = Service.recordManager.set(engine.metaURL, |
|
838 new WBORecord(engine.metaURL)); |
|
839 meta_global.payload.engines = {rotary: {version: engine.version, |
|
840 syncID: engine.syncID}}; |
|
841 try { |
|
842 |
|
843 // Confirm initial environment |
|
844 do_check_empty(engine._store.items); |
|
845 |
|
846 engine._syncStartup(); |
|
847 engine._processIncoming(); |
|
848 |
|
849 // Records have been applied and the expected failures have failed. |
|
850 do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE - 1 - 2); |
|
851 do_check_eq(engine.toFetch.length, 0); |
|
852 do_check_eq(engine.previousFailed.length, 2); |
|
853 do_check_eq(engine.previousFailed[0], "record-no-0"); |
|
854 do_check_eq(engine.previousFailed[1], "record-no-8"); |
|
855 |
|
856 } finally { |
|
857 cleanAndGo(server); |
|
858 } |
|
859 }); |
|
860 |
|
861 |
|
862 add_test(function test_processIncoming_applyIncomingBatchSize_multiple() { |
|
863 _("Ensure that incoming items are applied according to applyIncomingBatchSize."); |
|
864 Service.identity.username = "foo"; |
|
865 |
|
866 const APPLY_BATCH_SIZE = 10; |
|
867 |
|
868 // Engine that applies records in batches. |
|
869 let engine = makeRotaryEngine(); |
|
870 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; |
|
871 let batchCalls = 0; |
|
872 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; |
|
873 engine._store.applyIncomingBatch = function (records) { |
|
874 batchCalls += 1; |
|
875 do_check_eq(records.length, APPLY_BATCH_SIZE); |
|
876 this._applyIncomingBatch.apply(this, arguments); |
|
877 }; |
|
878 |
|
879 // Let's create three batches worth of server side records. |
|
880 let collection = new ServerCollection(); |
|
881 for (let i = 0; i < APPLY_BATCH_SIZE * 3; i++) { |
|
882 let id = 'record-no-' + i; |
|
883 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
884 collection.insert(id, payload); |
|
885 } |
|
886 |
|
887 let server = sync_httpd_setup({ |
|
888 "/1.1/foo/storage/rotary": collection.handler() |
|
889 }); |
|
890 |
|
891 let syncTesting = new SyncTestingInfrastructure(server); |
|
892 |
|
893 let meta_global = Service.recordManager.set(engine.metaURL, |
|
894 new WBORecord(engine.metaURL)); |
|
895 meta_global.payload.engines = {rotary: {version: engine.version, |
|
896 syncID: engine.syncID}}; |
|
897 try { |
|
898 |
|
899 // Confirm initial environment |
|
900 do_check_empty(engine._store.items); |
|
901 |
|
902 engine._syncStartup(); |
|
903 engine._processIncoming(); |
|
904 |
|
905 // Records have been applied in 3 batches. |
|
906 do_check_eq(batchCalls, 3); |
|
907 do_check_attribute_count(engine._store.items, APPLY_BATCH_SIZE * 3); |
|
908 |
|
909 } finally { |
|
910 cleanAndGo(server); |
|
911 } |
|
912 }); |
|
913 |
|
914 |
|
915 add_test(function test_processIncoming_notify_count() { |
|
916 _("Ensure that failed records are reported only once."); |
|
917 Service.identity.username = "foo"; |
|
918 |
|
919 const APPLY_BATCH_SIZE = 5; |
|
920 const NUMBER_OF_RECORDS = 15; |
|
921 |
|
922 // Engine that fails the first record. |
|
923 let engine = makeRotaryEngine(); |
|
924 engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; |
|
925 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; |
|
926 engine._store.applyIncomingBatch = function (records) { |
|
927 engine._store._applyIncomingBatch(records.slice(1)); |
|
928 return [records[0].id]; |
|
929 }; |
|
930 |
|
931 // Create a batch of server side records. |
|
932 let collection = new ServerCollection(); |
|
933 for (var i = 0; i < NUMBER_OF_RECORDS; i++) { |
|
934 let id = 'record-no-' + i; |
|
935 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
936 collection.insert(id, payload); |
|
937 } |
|
938 |
|
939 let server = sync_httpd_setup({ |
|
940 "/1.1/foo/storage/rotary": collection.handler() |
|
941 }); |
|
942 |
|
943 let syncTesting = new SyncTestingInfrastructure(server); |
|
944 |
|
945 let meta_global = Service.recordManager.set(engine.metaURL, |
|
946 new WBORecord(engine.metaURL)); |
|
947 meta_global.payload.engines = {rotary: {version: engine.version, |
|
948 syncID: engine.syncID}}; |
|
949 try { |
|
950 // Confirm initial environment. |
|
951 do_check_eq(engine.lastSync, 0); |
|
952 do_check_eq(engine.toFetch.length, 0); |
|
953 do_check_eq(engine.previousFailed.length, 0); |
|
954 do_check_empty(engine._store.items); |
|
955 |
|
956 let called = 0; |
|
957 let counts; |
|
958 function onApplied(count) { |
|
959 _("Called with " + JSON.stringify(counts)); |
|
960 counts = count; |
|
961 called++; |
|
962 } |
|
963 Svc.Obs.add("weave:engine:sync:applied", onApplied); |
|
964 |
|
965 // Do sync. |
|
966 engine._syncStartup(); |
|
967 engine._processIncoming(); |
|
968 |
|
969 // Confirm failures. |
|
970 do_check_attribute_count(engine._store.items, 12); |
|
971 do_check_eq(engine.previousFailed.length, 3); |
|
972 do_check_eq(engine.previousFailed[0], "record-no-0"); |
|
973 do_check_eq(engine.previousFailed[1], "record-no-5"); |
|
974 do_check_eq(engine.previousFailed[2], "record-no-10"); |
|
975 |
|
976 // There are newly failed records and they are reported. |
|
977 do_check_eq(called, 1); |
|
978 do_check_eq(counts.failed, 3); |
|
979 do_check_eq(counts.applied, 15); |
|
980 do_check_eq(counts.newFailed, 3); |
|
981 do_check_eq(counts.succeeded, 12); |
|
982 |
|
983 // Sync again, 1 of the failed items are the same, the rest didn't fail. |
|
984 engine._processIncoming(); |
|
985 |
|
986 // Confirming removed failures. |
|
987 do_check_attribute_count(engine._store.items, 14); |
|
988 do_check_eq(engine.previousFailed.length, 1); |
|
989 do_check_eq(engine.previousFailed[0], "record-no-0"); |
|
990 |
|
991 do_check_eq(called, 2); |
|
992 do_check_eq(counts.failed, 1); |
|
993 do_check_eq(counts.applied, 3); |
|
994 do_check_eq(counts.newFailed, 0); |
|
995 do_check_eq(counts.succeeded, 2); |
|
996 |
|
997 Svc.Obs.remove("weave:engine:sync:applied", onApplied); |
|
998 } finally { |
|
999 cleanAndGo(server); |
|
1000 } |
|
1001 }); |
|
1002 |
|
1003 |
|
1004 add_test(function test_processIncoming_previousFailed() { |
|
1005 _("Ensure that failed records are retried."); |
|
1006 Service.identity.username = "foo"; |
|
1007 Svc.Prefs.set("client.type", "mobile"); |
|
1008 |
|
1009 const APPLY_BATCH_SIZE = 4; |
|
1010 const NUMBER_OF_RECORDS = 14; |
|
1011 |
|
1012 // Engine that fails the first 2 records. |
|
1013 let engine = makeRotaryEngine(); |
|
1014 engine.mobileGUIDFetchBatchSize = engine.applyIncomingBatchSize = APPLY_BATCH_SIZE; |
|
1015 engine._store._applyIncomingBatch = engine._store.applyIncomingBatch; |
|
1016 engine._store.applyIncomingBatch = function (records) { |
|
1017 engine._store._applyIncomingBatch(records.slice(2)); |
|
1018 return [records[0].id, records[1].id]; |
|
1019 }; |
|
1020 |
|
1021 // Create a batch of server side records. |
|
1022 let collection = new ServerCollection(); |
|
1023 for (var i = 0; i < NUMBER_OF_RECORDS; i++) { |
|
1024 let id = 'record-no-' + i; |
|
1025 let payload = encryptPayload({id: id, denomination: "Record No. " + i}); |
|
1026 collection.insert(id, payload); |
|
1027 } |
|
1028 |
|
1029 let server = sync_httpd_setup({ |
|
1030 "/1.1/foo/storage/rotary": collection.handler() |
|
1031 }); |
|
1032 |
|
1033 let syncTesting = new SyncTestingInfrastructure(server); |
|
1034 |
|
1035 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1036 new WBORecord(engine.metaURL)); |
|
1037 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1038 syncID: engine.syncID}}; |
|
1039 try { |
|
1040 // Confirm initial environment. |
|
1041 do_check_eq(engine.lastSync, 0); |
|
1042 do_check_eq(engine.toFetch.length, 0); |
|
1043 do_check_eq(engine.previousFailed.length, 0); |
|
1044 do_check_empty(engine._store.items); |
|
1045 |
|
1046 // Initial failed items in previousFailed to be reset. |
|
1047 let previousFailed = [Utils.makeGUID(), Utils.makeGUID(), Utils.makeGUID()]; |
|
1048 engine.previousFailed = previousFailed; |
|
1049 do_check_eq(engine.previousFailed, previousFailed); |
|
1050 |
|
1051 // Do sync. |
|
1052 engine._syncStartup(); |
|
1053 engine._processIncoming(); |
|
1054 |
|
1055 // Expected result: 4 sync batches with 2 failures each => 8 failures |
|
1056 do_check_attribute_count(engine._store.items, 6); |
|
1057 do_check_eq(engine.previousFailed.length, 8); |
|
1058 do_check_eq(engine.previousFailed[0], "record-no-0"); |
|
1059 do_check_eq(engine.previousFailed[1], "record-no-1"); |
|
1060 do_check_eq(engine.previousFailed[2], "record-no-4"); |
|
1061 do_check_eq(engine.previousFailed[3], "record-no-5"); |
|
1062 do_check_eq(engine.previousFailed[4], "record-no-8"); |
|
1063 do_check_eq(engine.previousFailed[5], "record-no-9"); |
|
1064 do_check_eq(engine.previousFailed[6], "record-no-12"); |
|
1065 do_check_eq(engine.previousFailed[7], "record-no-13"); |
|
1066 |
|
1067 // Sync again with the same failed items (records 0, 1, 8, 9). |
|
1068 engine._processIncoming(); |
|
1069 |
|
1070 // A second sync with the same failed items should not add the same items again. |
|
1071 // Items that did not fail a second time should no longer be in previousFailed. |
|
1072 do_check_attribute_count(engine._store.items, 10); |
|
1073 do_check_eq(engine.previousFailed.length, 4); |
|
1074 do_check_eq(engine.previousFailed[0], "record-no-0"); |
|
1075 do_check_eq(engine.previousFailed[1], "record-no-1"); |
|
1076 do_check_eq(engine.previousFailed[2], "record-no-8"); |
|
1077 do_check_eq(engine.previousFailed[3], "record-no-9"); |
|
1078 |
|
1079 // Refetched items that didn't fail the second time are in engine._store.items. |
|
1080 do_check_eq(engine._store.items['record-no-4'], "Record No. 4"); |
|
1081 do_check_eq(engine._store.items['record-no-5'], "Record No. 5"); |
|
1082 do_check_eq(engine._store.items['record-no-12'], "Record No. 12"); |
|
1083 do_check_eq(engine._store.items['record-no-13'], "Record No. 13"); |
|
1084 } finally { |
|
1085 cleanAndGo(server); |
|
1086 } |
|
1087 }); |
|
1088 |
|
1089 |
|
1090 add_test(function test_processIncoming_failed_records() { |
|
1091 _("Ensure that failed records from _reconcile and applyIncomingBatch are refetched."); |
|
1092 Service.identity.username = "foo"; |
|
1093 |
|
1094 // Let's create three and a bit batches worth of server side records. |
|
1095 let collection = new ServerCollection(); |
|
1096 const NUMBER_OF_RECORDS = MOBILE_BATCH_SIZE * 3 + 5; |
|
1097 for (let i = 0; i < NUMBER_OF_RECORDS; i++) { |
|
1098 let id = 'record-no-' + i; |
|
1099 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
1100 let wbo = new ServerWBO(id, payload); |
|
1101 wbo.modified = Date.now()/1000 + 60 * (i - MOBILE_BATCH_SIZE * 3); |
|
1102 collection.insertWBO(wbo); |
|
1103 } |
|
1104 |
|
1105 // Engine that batches but likes to throw on a couple of records, |
|
1106 // two in each batch: the even ones fail in reconcile, the odd ones |
|
1107 // in applyIncoming. |
|
1108 const BOGUS_RECORDS = ["record-no-" + 42, |
|
1109 "record-no-" + 23, |
|
1110 "record-no-" + (42 + MOBILE_BATCH_SIZE), |
|
1111 "record-no-" + (23 + MOBILE_BATCH_SIZE), |
|
1112 "record-no-" + (42 + MOBILE_BATCH_SIZE * 2), |
|
1113 "record-no-" + (23 + MOBILE_BATCH_SIZE * 2), |
|
1114 "record-no-" + (2 + MOBILE_BATCH_SIZE * 3), |
|
1115 "record-no-" + (1 + MOBILE_BATCH_SIZE * 3)]; |
|
1116 let engine = makeRotaryEngine(); |
|
1117 engine.applyIncomingBatchSize = MOBILE_BATCH_SIZE; |
|
1118 |
|
1119 engine.__reconcile = engine._reconcile; |
|
1120 engine._reconcile = function _reconcile(record) { |
|
1121 if (BOGUS_RECORDS.indexOf(record.id) % 2 == 0) { |
|
1122 throw "I don't like this record! Baaaaaah!"; |
|
1123 } |
|
1124 return this.__reconcile.apply(this, arguments); |
|
1125 }; |
|
1126 engine._store._applyIncoming = engine._store.applyIncoming; |
|
1127 engine._store.applyIncoming = function (record) { |
|
1128 if (BOGUS_RECORDS.indexOf(record.id) % 2 == 1) { |
|
1129 throw "I don't like this record! Baaaaaah!"; |
|
1130 } |
|
1131 return this._applyIncoming.apply(this, arguments); |
|
1132 }; |
|
1133 |
|
1134 // Keep track of requests made of a collection. |
|
1135 let count = 0; |
|
1136 let uris = []; |
|
1137 function recording_handler(collection) { |
|
1138 let h = collection.handler(); |
|
1139 return function(req, res) { |
|
1140 ++count; |
|
1141 uris.push(req.path + "?" + req.queryString); |
|
1142 return h(req, res); |
|
1143 }; |
|
1144 } |
|
1145 let server = sync_httpd_setup({ |
|
1146 "/1.1/foo/storage/rotary": recording_handler(collection) |
|
1147 }); |
|
1148 |
|
1149 let syncTesting = new SyncTestingInfrastructure(server); |
|
1150 |
|
1151 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1152 new WBORecord(engine.metaURL)); |
|
1153 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1154 syncID: engine.syncID}}; |
|
1155 |
|
1156 try { |
|
1157 |
|
1158 // Confirm initial environment |
|
1159 do_check_eq(engine.lastSync, 0); |
|
1160 do_check_eq(engine.toFetch.length, 0); |
|
1161 do_check_eq(engine.previousFailed.length, 0); |
|
1162 do_check_empty(engine._store.items); |
|
1163 |
|
1164 let observerSubject; |
|
1165 let observerData; |
|
1166 Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) { |
|
1167 Svc.Obs.remove("weave:engine:sync:applied", onApplied); |
|
1168 observerSubject = subject; |
|
1169 observerData = data; |
|
1170 }); |
|
1171 |
|
1172 engine._syncStartup(); |
|
1173 engine._processIncoming(); |
|
1174 |
|
1175 // Ensure that all records but the bogus 4 have been applied. |
|
1176 do_check_attribute_count(engine._store.items, |
|
1177 NUMBER_OF_RECORDS - BOGUS_RECORDS.length); |
|
1178 |
|
1179 // Ensure that the bogus records will be fetched again on the next sync. |
|
1180 do_check_eq(engine.previousFailed.length, BOGUS_RECORDS.length); |
|
1181 engine.previousFailed.sort(); |
|
1182 BOGUS_RECORDS.sort(); |
|
1183 for (let i = 0; i < engine.previousFailed.length; i++) { |
|
1184 do_check_eq(engine.previousFailed[i], BOGUS_RECORDS[i]); |
|
1185 } |
|
1186 |
|
1187 // Ensure the observer was notified |
|
1188 do_check_eq(observerData, engine.name); |
|
1189 do_check_eq(observerSubject.failed, BOGUS_RECORDS.length); |
|
1190 do_check_eq(observerSubject.newFailed, BOGUS_RECORDS.length); |
|
1191 |
|
1192 // Testing batching of failed item fetches. |
|
1193 // Try to sync again. Ensure that we split the request into chunks to avoid |
|
1194 // URI length limitations. |
|
1195 function batchDownload(batchSize) { |
|
1196 count = 0; |
|
1197 uris = []; |
|
1198 engine.guidFetchBatchSize = batchSize; |
|
1199 engine._processIncoming(); |
|
1200 _("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris)); |
|
1201 return count; |
|
1202 } |
|
1203 |
|
1204 // There are 8 bad records, so this needs 3 fetches. |
|
1205 _("Test batching with ID batch size 3, normal mobile batch size."); |
|
1206 do_check_eq(batchDownload(3), 3); |
|
1207 |
|
1208 // Now see with a more realistic limit. |
|
1209 _("Test batching with sufficient ID batch size."); |
|
1210 do_check_eq(batchDownload(BOGUS_RECORDS.length), 1); |
|
1211 |
|
1212 // If we're on mobile, that limit is used by default. |
|
1213 _("Test batching with tiny mobile batch size."); |
|
1214 Svc.Prefs.set("client.type", "mobile"); |
|
1215 engine.mobileGUIDFetchBatchSize = 2; |
|
1216 do_check_eq(batchDownload(BOGUS_RECORDS.length), 4); |
|
1217 |
|
1218 } finally { |
|
1219 cleanAndGo(server); |
|
1220 } |
|
1221 }); |
|
1222 |
|
1223 |
|
1224 add_test(function test_processIncoming_decrypt_failed() { |
|
1225 _("Ensure that records failing to decrypt are either replaced or refetched."); |
|
1226 |
|
1227 Service.identity.username = "foo"; |
|
1228 |
|
1229 // Some good and some bogus records. One doesn't contain valid JSON, |
|
1230 // the other will throw during decrypt. |
|
1231 let collection = new ServerCollection(); |
|
1232 collection._wbos.flying = new ServerWBO( |
|
1233 'flying', encryptPayload({id: 'flying', |
|
1234 denomination: "LNER Class A3 4472"})); |
|
1235 collection._wbos.nojson = new ServerWBO("nojson", "This is invalid JSON"); |
|
1236 collection._wbos.nojson2 = new ServerWBO("nojson2", "This is invalid JSON"); |
|
1237 collection._wbos.scotsman = new ServerWBO( |
|
1238 'scotsman', encryptPayload({id: 'scotsman', |
|
1239 denomination: "Flying Scotsman"})); |
|
1240 collection._wbos.nodecrypt = new ServerWBO("nodecrypt", "Decrypt this!"); |
|
1241 collection._wbos.nodecrypt2 = new ServerWBO("nodecrypt2", "Decrypt this!"); |
|
1242 |
|
1243 // Patch the fake crypto service to throw on the record above. |
|
1244 Svc.Crypto._decrypt = Svc.Crypto.decrypt; |
|
1245 Svc.Crypto.decrypt = function (ciphertext) { |
|
1246 if (ciphertext == "Decrypt this!") { |
|
1247 throw "Derp! Cipher finalized failed. Im ur crypto destroyin ur recordz."; |
|
1248 } |
|
1249 return this._decrypt.apply(this, arguments); |
|
1250 }; |
|
1251 |
|
1252 // Some broken records also exist locally. |
|
1253 let engine = makeRotaryEngine(); |
|
1254 engine.enabled = true; |
|
1255 engine._store.items = {nojson: "Valid JSON", |
|
1256 nodecrypt: "Valid ciphertext"}; |
|
1257 |
|
1258 let server = sync_httpd_setup({ |
|
1259 "/1.1/foo/storage/rotary": collection.handler() |
|
1260 }); |
|
1261 |
|
1262 let syncTesting = new SyncTestingInfrastructure(server); |
|
1263 |
|
1264 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1265 new WBORecord(engine.metaURL)); |
|
1266 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1267 syncID: engine.syncID}}; |
|
1268 try { |
|
1269 |
|
1270 // Confirm initial state |
|
1271 do_check_eq(engine.toFetch.length, 0); |
|
1272 do_check_eq(engine.previousFailed.length, 0); |
|
1273 |
|
1274 let observerSubject; |
|
1275 let observerData; |
|
1276 Svc.Obs.add("weave:engine:sync:applied", function onApplied(subject, data) { |
|
1277 Svc.Obs.remove("weave:engine:sync:applied", onApplied); |
|
1278 observerSubject = subject; |
|
1279 observerData = data; |
|
1280 }); |
|
1281 |
|
1282 engine.lastSync = collection.wbo("nojson").modified - 1; |
|
1283 engine.sync(); |
|
1284 |
|
1285 do_check_eq(engine.previousFailed.length, 4); |
|
1286 do_check_eq(engine.previousFailed[0], "nojson"); |
|
1287 do_check_eq(engine.previousFailed[1], "nojson2"); |
|
1288 do_check_eq(engine.previousFailed[2], "nodecrypt"); |
|
1289 do_check_eq(engine.previousFailed[3], "nodecrypt2"); |
|
1290 |
|
1291 // Ensure the observer was notified |
|
1292 do_check_eq(observerData, engine.name); |
|
1293 do_check_eq(observerSubject.applied, 2); |
|
1294 do_check_eq(observerSubject.failed, 4); |
|
1295 |
|
1296 } finally { |
|
1297 cleanAndGo(server); |
|
1298 } |
|
1299 }); |
|
1300 |
|
1301 |
|
1302 add_test(function test_uploadOutgoing_toEmptyServer() { |
|
1303 _("SyncEngine._uploadOutgoing uploads new records to server"); |
|
1304 |
|
1305 Service.identity.username = "foo"; |
|
1306 let collection = new ServerCollection(); |
|
1307 collection._wbos.flying = new ServerWBO('flying'); |
|
1308 collection._wbos.scotsman = new ServerWBO('scotsman'); |
|
1309 |
|
1310 let server = sync_httpd_setup({ |
|
1311 "/1.1/foo/storage/rotary": collection.handler(), |
|
1312 "/1.1/foo/storage/rotary/flying": collection.wbo("flying").handler(), |
|
1313 "/1.1/foo/storage/rotary/scotsman": collection.wbo("scotsman").handler() |
|
1314 }); |
|
1315 |
|
1316 let syncTesting = new SyncTestingInfrastructure(server); |
|
1317 generateNewKeys(Service.collectionKeys); |
|
1318 |
|
1319 let engine = makeRotaryEngine(); |
|
1320 engine.lastSync = 123; // needs to be non-zero so that tracker is queried |
|
1321 engine._store.items = {flying: "LNER Class A3 4472", |
|
1322 scotsman: "Flying Scotsman"}; |
|
1323 // Mark one of these records as changed |
|
1324 engine._tracker.addChangedID('scotsman', 0); |
|
1325 |
|
1326 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1327 new WBORecord(engine.metaURL)); |
|
1328 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1329 syncID: engine.syncID}}; |
|
1330 |
|
1331 try { |
|
1332 |
|
1333 // Confirm initial environment |
|
1334 do_check_eq(engine.lastSyncLocal, 0); |
|
1335 do_check_eq(collection.payload("flying"), undefined); |
|
1336 do_check_eq(collection.payload("scotsman"), undefined); |
|
1337 |
|
1338 engine._syncStartup(); |
|
1339 engine._uploadOutgoing(); |
|
1340 |
|
1341 // Local timestamp has been set. |
|
1342 do_check_true(engine.lastSyncLocal > 0); |
|
1343 |
|
1344 // Ensure the marked record ('scotsman') has been uploaded and is |
|
1345 // no longer marked. |
|
1346 do_check_eq(collection.payload("flying"), undefined); |
|
1347 do_check_true(!!collection.payload("scotsman")); |
|
1348 do_check_eq(JSON.parse(collection.wbo("scotsman").data.ciphertext).id, |
|
1349 "scotsman"); |
|
1350 do_check_eq(engine._tracker.changedIDs["scotsman"], undefined); |
|
1351 |
|
1352 // The 'flying' record wasn't marked so it wasn't uploaded |
|
1353 do_check_eq(collection.payload("flying"), undefined); |
|
1354 |
|
1355 } finally { |
|
1356 cleanAndGo(server); |
|
1357 } |
|
1358 }); |
|
1359 |
|
1360 |
|
1361 add_test(function test_uploadOutgoing_failed() { |
|
1362 _("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload."); |
|
1363 |
|
1364 Service.identity.username = "foo"; |
|
1365 let collection = new ServerCollection(); |
|
1366 // We only define the "flying" WBO on the server, not the "scotsman" |
|
1367 // and "peppercorn" ones. |
|
1368 collection._wbos.flying = new ServerWBO('flying'); |
|
1369 |
|
1370 let server = sync_httpd_setup({ |
|
1371 "/1.1/foo/storage/rotary": collection.handler() |
|
1372 }); |
|
1373 |
|
1374 let syncTesting = new SyncTestingInfrastructure(server); |
|
1375 |
|
1376 let engine = makeRotaryEngine(); |
|
1377 engine.lastSync = 123; // needs to be non-zero so that tracker is queried |
|
1378 engine._store.items = {flying: "LNER Class A3 4472", |
|
1379 scotsman: "Flying Scotsman", |
|
1380 peppercorn: "Peppercorn Class"}; |
|
1381 // Mark these records as changed |
|
1382 const FLYING_CHANGED = 12345; |
|
1383 const SCOTSMAN_CHANGED = 23456; |
|
1384 const PEPPERCORN_CHANGED = 34567; |
|
1385 engine._tracker.addChangedID('flying', FLYING_CHANGED); |
|
1386 engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED); |
|
1387 engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED); |
|
1388 |
|
1389 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1390 new WBORecord(engine.metaURL)); |
|
1391 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1392 syncID: engine.syncID}}; |
|
1393 |
|
1394 try { |
|
1395 |
|
1396 // Confirm initial environment |
|
1397 do_check_eq(engine.lastSyncLocal, 0); |
|
1398 do_check_eq(collection.payload("flying"), undefined); |
|
1399 do_check_eq(engine._tracker.changedIDs['flying'], FLYING_CHANGED); |
|
1400 do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED); |
|
1401 do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED); |
|
1402 |
|
1403 engine.enabled = true; |
|
1404 engine.sync(); |
|
1405 |
|
1406 // Local timestamp has been set. |
|
1407 do_check_true(engine.lastSyncLocal > 0); |
|
1408 |
|
1409 // Ensure the 'flying' record has been uploaded and is no longer marked. |
|
1410 do_check_true(!!collection.payload("flying")); |
|
1411 do_check_eq(engine._tracker.changedIDs['flying'], undefined); |
|
1412 |
|
1413 // The 'scotsman' and 'peppercorn' records couldn't be uploaded so |
|
1414 // they weren't cleared from the tracker. |
|
1415 do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED); |
|
1416 do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED); |
|
1417 |
|
1418 } finally { |
|
1419 cleanAndGo(server); |
|
1420 } |
|
1421 }); |
|
1422 |
|
1423 |
|
1424 add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() { |
|
1425 _("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS"); |
|
1426 |
|
1427 Service.identity.username = "foo"; |
|
1428 let collection = new ServerCollection(); |
|
1429 |
|
1430 // Let's count how many times the client posts to the server |
|
1431 var noOfUploads = 0; |
|
1432 collection.post = (function(orig) { |
|
1433 return function() { |
|
1434 noOfUploads++; |
|
1435 return orig.apply(this, arguments); |
|
1436 }; |
|
1437 }(collection.post)); |
|
1438 |
|
1439 // Create a bunch of records (and server side handlers) |
|
1440 let engine = makeRotaryEngine(); |
|
1441 for (var i = 0; i < 234; i++) { |
|
1442 let id = 'record-no-' + i; |
|
1443 engine._store.items[id] = "Record No. " + i; |
|
1444 engine._tracker.addChangedID(id, 0); |
|
1445 collection.insert(id); |
|
1446 } |
|
1447 |
|
1448 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1449 new WBORecord(engine.metaURL)); |
|
1450 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1451 syncID: engine.syncID}}; |
|
1452 |
|
1453 let server = sync_httpd_setup({ |
|
1454 "/1.1/foo/storage/rotary": collection.handler() |
|
1455 }); |
|
1456 |
|
1457 let syncTesting = new SyncTestingInfrastructure(server); |
|
1458 |
|
1459 try { |
|
1460 |
|
1461 // Confirm initial environment. |
|
1462 do_check_eq(noOfUploads, 0); |
|
1463 |
|
1464 engine._syncStartup(); |
|
1465 engine._uploadOutgoing(); |
|
1466 |
|
1467 // Ensure all records have been uploaded. |
|
1468 for (i = 0; i < 234; i++) { |
|
1469 do_check_true(!!collection.payload('record-no-' + i)); |
|
1470 } |
|
1471 |
|
1472 // Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS. |
|
1473 do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS)); |
|
1474 |
|
1475 } finally { |
|
1476 cleanAndGo(server); |
|
1477 } |
|
1478 }); |
|
1479 |
|
1480 |
|
1481 add_test(function test_syncFinish_noDelete() { |
|
1482 _("SyncEngine._syncFinish resets tracker's score"); |
|
1483 |
|
1484 let server = httpd_setup({}); |
|
1485 |
|
1486 let syncTesting = new SyncTestingInfrastructure(server); |
|
1487 let engine = makeRotaryEngine(); |
|
1488 engine._delete = {}; // Nothing to delete |
|
1489 engine._tracker.score = 100; |
|
1490 |
|
1491 // _syncFinish() will reset the engine's score. |
|
1492 engine._syncFinish(); |
|
1493 do_check_eq(engine.score, 0); |
|
1494 server.stop(run_next_test); |
|
1495 }); |
|
1496 |
|
1497 |
|
1498 add_test(function test_syncFinish_deleteByIds() { |
|
1499 _("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs)."); |
|
1500 |
|
1501 Service.identity.username = "foo"; |
|
1502 let collection = new ServerCollection(); |
|
1503 collection._wbos.flying = new ServerWBO( |
|
1504 'flying', encryptPayload({id: 'flying', |
|
1505 denomination: "LNER Class A3 4472"})); |
|
1506 collection._wbos.scotsman = new ServerWBO( |
|
1507 'scotsman', encryptPayload({id: 'scotsman', |
|
1508 denomination: "Flying Scotsman"})); |
|
1509 collection._wbos.rekolok = new ServerWBO( |
|
1510 'rekolok', encryptPayload({id: 'rekolok', |
|
1511 denomination: "Rekonstruktionslokomotive"})); |
|
1512 |
|
1513 let server = httpd_setup({ |
|
1514 "/1.1/foo/storage/rotary": collection.handler() |
|
1515 }); |
|
1516 let syncTesting = new SyncTestingInfrastructure(server); |
|
1517 |
|
1518 let engine = makeRotaryEngine(); |
|
1519 try { |
|
1520 engine._delete = {ids: ['flying', 'rekolok']}; |
|
1521 engine._syncFinish(); |
|
1522 |
|
1523 // The 'flying' and 'rekolok' records were deleted while the |
|
1524 // 'scotsman' one wasn't. |
|
1525 do_check_eq(collection.payload("flying"), undefined); |
|
1526 do_check_true(!!collection.payload("scotsman")); |
|
1527 do_check_eq(collection.payload("rekolok"), undefined); |
|
1528 |
|
1529 // The deletion todo list has been reset. |
|
1530 do_check_eq(engine._delete.ids, undefined); |
|
1531 |
|
1532 } finally { |
|
1533 cleanAndGo(server); |
|
1534 } |
|
1535 }); |
|
1536 |
|
1537 |
|
1538 add_test(function test_syncFinish_deleteLotsInBatches() { |
|
1539 _("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs)."); |
|
1540 |
|
1541 Service.identity.username = "foo"; |
|
1542 let collection = new ServerCollection(); |
|
1543 |
|
1544 // Let's count how many times the client does a DELETE request to the server |
|
1545 var noOfUploads = 0; |
|
1546 collection.delete = (function(orig) { |
|
1547 return function() { |
|
1548 noOfUploads++; |
|
1549 return orig.apply(this, arguments); |
|
1550 }; |
|
1551 }(collection.delete)); |
|
1552 |
|
1553 // Create a bunch of records on the server |
|
1554 let now = Date.now(); |
|
1555 for (var i = 0; i < 234; i++) { |
|
1556 let id = 'record-no-' + i; |
|
1557 let payload = encryptPayload({id: id, denomination: "Record No. " + i}); |
|
1558 let wbo = new ServerWBO(id, payload); |
|
1559 wbo.modified = now / 1000 - 60 * (i + 110); |
|
1560 collection.insertWBO(wbo); |
|
1561 } |
|
1562 |
|
1563 let server = httpd_setup({ |
|
1564 "/1.1/foo/storage/rotary": collection.handler() |
|
1565 }); |
|
1566 |
|
1567 let syncTesting = new SyncTestingInfrastructure(server); |
|
1568 |
|
1569 let engine = makeRotaryEngine(); |
|
1570 try { |
|
1571 |
|
1572 // Confirm initial environment |
|
1573 do_check_eq(noOfUploads, 0); |
|
1574 |
|
1575 // Declare what we want to have deleted: all records no. 100 and |
|
1576 // up and all records that are less than 200 mins old (which are |
|
1577 // records 0 thru 90). |
|
1578 engine._delete = {ids: [], |
|
1579 newer: now / 1000 - 60 * 200.5}; |
|
1580 for (i = 100; i < 234; i++) { |
|
1581 engine._delete.ids.push('record-no-' + i); |
|
1582 } |
|
1583 |
|
1584 engine._syncFinish(); |
|
1585 |
|
1586 // Ensure that the appropriate server data has been wiped while |
|
1587 // preserving records 90 thru 200. |
|
1588 for (i = 0; i < 234; i++) { |
|
1589 let id = 'record-no-' + i; |
|
1590 if (i <= 90 || i >= 100) { |
|
1591 do_check_eq(collection.payload(id), undefined); |
|
1592 } else { |
|
1593 do_check_true(!!collection.payload(id)); |
|
1594 } |
|
1595 } |
|
1596 |
|
1597 // The deletion was done in batches |
|
1598 do_check_eq(noOfUploads, 2 + 1); |
|
1599 |
|
1600 // The deletion todo list has been reset. |
|
1601 do_check_eq(engine._delete.ids, undefined); |
|
1602 |
|
1603 } finally { |
|
1604 cleanAndGo(server); |
|
1605 } |
|
1606 }); |
|
1607 |
|
1608 |
|
1609 add_test(function test_sync_partialUpload() { |
|
1610 _("SyncEngine.sync() keeps changedIDs that couldn't be uploaded."); |
|
1611 |
|
1612 Service.identity.username = "foo"; |
|
1613 |
|
1614 let collection = new ServerCollection(); |
|
1615 let server = sync_httpd_setup({ |
|
1616 "/1.1/foo/storage/rotary": collection.handler() |
|
1617 }); |
|
1618 let syncTesting = new SyncTestingInfrastructure(server); |
|
1619 generateNewKeys(Service.collectionKeys); |
|
1620 |
|
1621 let engine = makeRotaryEngine(); |
|
1622 engine.lastSync = 123; // needs to be non-zero so that tracker is queried |
|
1623 engine.lastSyncLocal = 456; |
|
1624 |
|
1625 // Let the third upload fail completely |
|
1626 var noOfUploads = 0; |
|
1627 collection.post = (function(orig) { |
|
1628 return function() { |
|
1629 if (noOfUploads == 2) |
|
1630 throw "FAIL!"; |
|
1631 noOfUploads++; |
|
1632 return orig.apply(this, arguments); |
|
1633 }; |
|
1634 }(collection.post)); |
|
1635 |
|
1636 // Create a bunch of records (and server side handlers) |
|
1637 for (let i = 0; i < 234; i++) { |
|
1638 let id = 'record-no-' + i; |
|
1639 engine._store.items[id] = "Record No. " + i; |
|
1640 engine._tracker.addChangedID(id, i); |
|
1641 // Let two items in the first upload batch fail. |
|
1642 if ((i != 23) && (i != 42)) { |
|
1643 collection.insert(id); |
|
1644 } |
|
1645 } |
|
1646 |
|
1647 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1648 new WBORecord(engine.metaURL)); |
|
1649 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1650 syncID: engine.syncID}}; |
|
1651 |
|
1652 try { |
|
1653 |
|
1654 engine.enabled = true; |
|
1655 let error; |
|
1656 try { |
|
1657 engine.sync(); |
|
1658 } catch (ex) { |
|
1659 error = ex; |
|
1660 } |
|
1661 do_check_true(!!error); |
|
1662 |
|
1663 // The timestamp has been updated. |
|
1664 do_check_true(engine.lastSyncLocal > 456); |
|
1665 |
|
1666 for (let i = 0; i < 234; i++) { |
|
1667 let id = 'record-no-' + i; |
|
1668 // Ensure failed records are back in the tracker: |
|
1669 // * records no. 23 and 42 were rejected by the server, |
|
1670 // * records no. 200 and higher couldn't be uploaded because we failed |
|
1671 // hard on the 3rd upload. |
|
1672 if ((i == 23) || (i == 42) || (i >= 200)) |
|
1673 do_check_eq(engine._tracker.changedIDs[id], i); |
|
1674 else |
|
1675 do_check_false(id in engine._tracker.changedIDs); |
|
1676 } |
|
1677 |
|
1678 } finally { |
|
1679 cleanAndGo(server); |
|
1680 } |
|
1681 }); |
|
1682 |
|
1683 add_test(function test_canDecrypt_noCryptoKeys() { |
|
1684 _("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection."); |
|
1685 Service.identity.username = "foo"; |
|
1686 |
|
1687 // Wipe collection keys so we can test the desired scenario. |
|
1688 Service.collectionKeys.clear(); |
|
1689 |
|
1690 let collection = new ServerCollection(); |
|
1691 collection._wbos.flying = new ServerWBO( |
|
1692 'flying', encryptPayload({id: 'flying', |
|
1693 denomination: "LNER Class A3 4472"})); |
|
1694 |
|
1695 let server = sync_httpd_setup({ |
|
1696 "/1.1/foo/storage/rotary": collection.handler() |
|
1697 }); |
|
1698 |
|
1699 let syncTesting = new SyncTestingInfrastructure(server); |
|
1700 let engine = makeRotaryEngine(); |
|
1701 try { |
|
1702 |
|
1703 do_check_false(engine.canDecrypt()); |
|
1704 |
|
1705 } finally { |
|
1706 cleanAndGo(server); |
|
1707 } |
|
1708 }); |
|
1709 |
|
1710 add_test(function test_canDecrypt_true() { |
|
1711 _("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server."); |
|
1712 Service.identity.username = "foo"; |
|
1713 |
|
1714 generateNewKeys(Service.collectionKeys); |
|
1715 |
|
1716 let collection = new ServerCollection(); |
|
1717 collection._wbos.flying = new ServerWBO( |
|
1718 'flying', encryptPayload({id: 'flying', |
|
1719 denomination: "LNER Class A3 4472"})); |
|
1720 |
|
1721 let server = sync_httpd_setup({ |
|
1722 "/1.1/foo/storage/rotary": collection.handler() |
|
1723 }); |
|
1724 |
|
1725 let syncTesting = new SyncTestingInfrastructure(server); |
|
1726 let engine = makeRotaryEngine(); |
|
1727 try { |
|
1728 |
|
1729 do_check_true(engine.canDecrypt()); |
|
1730 |
|
1731 } finally { |
|
1732 cleanAndGo(server); |
|
1733 } |
|
1734 |
|
1735 }); |
|
1736 |
|
1737 add_test(function test_syncapplied_observer() { |
|
1738 Service.identity.username = "foo"; |
|
1739 |
|
1740 const NUMBER_OF_RECORDS = 10; |
|
1741 |
|
1742 let engine = makeRotaryEngine(); |
|
1743 |
|
1744 // Create a batch of server side records. |
|
1745 let collection = new ServerCollection(); |
|
1746 for (var i = 0; i < NUMBER_OF_RECORDS; i++) { |
|
1747 let id = 'record-no-' + i; |
|
1748 let payload = encryptPayload({id: id, denomination: "Record No. " + id}); |
|
1749 collection.insert(id, payload); |
|
1750 } |
|
1751 |
|
1752 let server = httpd_setup({ |
|
1753 "/1.1/foo/storage/rotary": collection.handler() |
|
1754 }); |
|
1755 |
|
1756 let syncTesting = new SyncTestingInfrastructure(server); |
|
1757 |
|
1758 let meta_global = Service.recordManager.set(engine.metaURL, |
|
1759 new WBORecord(engine.metaURL)); |
|
1760 meta_global.payload.engines = {rotary: {version: engine.version, |
|
1761 syncID: engine.syncID}}; |
|
1762 |
|
1763 let numApplyCalls = 0; |
|
1764 let engine_name; |
|
1765 let count; |
|
1766 function onApplied(subject, data) { |
|
1767 numApplyCalls++; |
|
1768 engine_name = data; |
|
1769 count = subject; |
|
1770 } |
|
1771 |
|
1772 Svc.Obs.add("weave:engine:sync:applied", onApplied); |
|
1773 |
|
1774 try { |
|
1775 Service.scheduler.hasIncomingItems = false; |
|
1776 |
|
1777 // Do sync. |
|
1778 engine._syncStartup(); |
|
1779 engine._processIncoming(); |
|
1780 |
|
1781 do_check_attribute_count(engine._store.items, 10); |
|
1782 |
|
1783 do_check_eq(numApplyCalls, 1); |
|
1784 do_check_eq(engine_name, "rotary"); |
|
1785 do_check_eq(count.applied, 10); |
|
1786 |
|
1787 do_check_true(Service.scheduler.hasIncomingItems); |
|
1788 } finally { |
|
1789 cleanAndGo(server); |
|
1790 Service.scheduler.hasIncomingItems = false; |
|
1791 Svc.Obs.remove("weave:engine:sync:applied", onApplied); |
|
1792 } |
|
1793 }); |