|
1 /* -*- js-indent-level: 4 -*- */ |
|
2 /* |
|
3 * e10s event dispatcher from content->chrome |
|
4 * |
|
5 * type = eventName (QuitApplication) |
|
6 * data = json object {"filename":filename} <- for LoggerInit |
|
7 */ |
|
8 function getElement(id) { |
|
9 return ((typeof(id) == "string") ? |
|
10 document.getElementById(id) : id); |
|
11 } |
|
12 |
|
13 this.$ = this.getElement; |
|
14 |
|
15 function contentDispatchEvent(type, data, sync) { |
|
16 if (typeof(data) == "undefined") { |
|
17 data = {}; |
|
18 } |
|
19 |
|
20 var event = new CustomEvent("contentEvent", { |
|
21 bubbles: true, |
|
22 detail: { |
|
23 "sync": sync, |
|
24 "type": type, |
|
25 "data": JSON.stringify(data) |
|
26 } |
|
27 }); |
|
28 document.dispatchEvent(event); |
|
29 } |
|
30 |
|
31 function contentAsyncEvent(type, data) { |
|
32 contentDispatchEvent(type, data, 0); |
|
33 } |
|
34 |
|
35 /* Helper Function */ |
|
36 function extend(obj, /* optional */ skip) { |
|
37 // Extend an array with an array-like object starting |
|
38 // from the skip index |
|
39 if (!skip) { |
|
40 skip = 0; |
|
41 } |
|
42 if (obj) { |
|
43 var l = obj.length; |
|
44 var ret = []; |
|
45 for (var i = skip; i < l; i++) { |
|
46 ret.push(obj[i]); |
|
47 } |
|
48 } |
|
49 return ret; |
|
50 } |
|
51 |
|
52 function flattenArguments(lst/* ...*/) { |
|
53 var res = []; |
|
54 var args = extend(arguments); |
|
55 while (args.length) { |
|
56 var o = args.shift(); |
|
57 if (o && typeof(o) == "object" && typeof(o.length) == "number") { |
|
58 for (var i = o.length - 1; i >= 0; i--) { |
|
59 args.unshift(o[i]); |
|
60 } |
|
61 } else { |
|
62 res.push(o); |
|
63 } |
|
64 } |
|
65 return res; |
|
66 } |
|
67 |
|
68 /** |
|
69 * TestRunner: A test runner for SimpleTest |
|
70 * TODO: |
|
71 * |
|
72 * * Avoid moving iframes: That causes reloads on mozilla and opera. |
|
73 * |
|
74 * |
|
75 **/ |
|
76 var TestRunner = {}; |
|
77 TestRunner.logEnabled = false; |
|
78 TestRunner._currentTest = 0; |
|
79 TestRunner._lastTestFinished = -1; |
|
80 TestRunner._loopIsRestarting = false; |
|
81 TestRunner.currentTestURL = ""; |
|
82 TestRunner.originalTestURL = ""; |
|
83 TestRunner._urls = []; |
|
84 TestRunner._lastAssertionCount = 0; |
|
85 TestRunner._expectedMinAsserts = 0; |
|
86 TestRunner._expectedMaxAsserts = 0; |
|
87 |
|
88 TestRunner.timeout = 5 * 60 * 1000; // 5 minutes. |
|
89 TestRunner.maxTimeouts = 4; // halt testing after too many timeouts |
|
90 TestRunner.runSlower = false; |
|
91 TestRunner.dumpOutputDirectory = ""; |
|
92 TestRunner.dumpAboutMemoryAfterTest = false; |
|
93 TestRunner.dumpDMDAfterTest = false; |
|
94 TestRunner.quiet = false; |
|
95 TestRunner.slowestTestTime = 0; |
|
96 TestRunner.slowestTestURL = ""; |
|
97 |
|
98 TestRunner._expectingProcessCrash = false; |
|
99 |
|
100 /** |
|
101 * Make sure the tests don't hang indefinitely. |
|
102 **/ |
|
103 TestRunner._numTimeouts = 0; |
|
104 TestRunner._currentTestStartTime = new Date().valueOf(); |
|
105 TestRunner._timeoutFactor = 1; |
|
106 |
|
107 TestRunner._checkForHangs = function() { |
|
108 function reportError(win, msg) { |
|
109 if ("SimpleTest" in win) { |
|
110 win.SimpleTest.ok(false, msg); |
|
111 } else if ("W3CTest" in win) { |
|
112 win.W3CTest.logFailure(msg); |
|
113 } |
|
114 } |
|
115 |
|
116 function killTest(win) { |
|
117 if ("SimpleTest" in win) { |
|
118 win.SimpleTest.finish(); |
|
119 } else if ("W3CTest" in win) { |
|
120 win.W3CTest.timeout(); |
|
121 } |
|
122 } |
|
123 |
|
124 if (TestRunner._currentTest < TestRunner._urls.length) { |
|
125 var runtime = new Date().valueOf() - TestRunner._currentTestStartTime; |
|
126 if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) { |
|
127 var frameWindow = $('testframe').contentWindow.wrappedJSObject || |
|
128 $('testframe').contentWindow; |
|
129 reportError(frameWindow, "Test timed out."); |
|
130 |
|
131 // If we have too many timeouts, give up. We don't want to wait hours |
|
132 // for results if some bug causes lots of tests to time out. |
|
133 if (++TestRunner._numTimeouts >= TestRunner.maxTimeouts) { |
|
134 TestRunner._haltTests = true; |
|
135 |
|
136 TestRunner.currentTestURL = "(SimpleTest/TestRunner.js)"; |
|
137 reportError(frameWindow, TestRunner.maxTimeouts + " test timeouts, giving up."); |
|
138 var skippedTests = TestRunner._urls.length - TestRunner._currentTest; |
|
139 reportError(frameWindow, "Skipping " + skippedTests + " remaining tests."); |
|
140 } |
|
141 |
|
142 // Add a little (1 second) delay to ensure automation.py has time to notice |
|
143 // "Test timed out" log and process it (= take a screenshot). |
|
144 setTimeout(function delayedKillTest() { killTest(frameWindow); }, 1000); |
|
145 |
|
146 if (TestRunner._haltTests) |
|
147 return; |
|
148 } |
|
149 |
|
150 setTimeout(TestRunner._checkForHangs, 30000); |
|
151 } |
|
152 } |
|
153 |
|
154 TestRunner.requestLongerTimeout = function(factor) { |
|
155 TestRunner._timeoutFactor = factor; |
|
156 } |
|
157 |
|
158 /** |
|
159 * This is used to loop tests |
|
160 **/ |
|
161 TestRunner.repeat = 0; |
|
162 TestRunner._currentLoop = 1; |
|
163 |
|
164 TestRunner.expectAssertions = function(min, max) { |
|
165 if (typeof(max) == "undefined") { |
|
166 max = min; |
|
167 } |
|
168 if (typeof(min) != "number" || typeof(max) != "number" || |
|
169 min < 0 || max < min) { |
|
170 throw "bad parameter to expectAssertions"; |
|
171 } |
|
172 TestRunner._expectedMinAsserts = min; |
|
173 TestRunner._expectedMaxAsserts = max; |
|
174 } |
|
175 |
|
176 /** |
|
177 * This function is called after generating the summary. |
|
178 **/ |
|
179 TestRunner.onComplete = null; |
|
180 |
|
181 /** |
|
182 * Adds a failed test case to a list so we can rerun only the failed tests |
|
183 **/ |
|
184 TestRunner._failedTests = {}; |
|
185 TestRunner._failureFile = ""; |
|
186 |
|
187 TestRunner.addFailedTest = function(testName) { |
|
188 if (TestRunner._failedTests[testName] == undefined) { |
|
189 TestRunner._failedTests[testName] = ""; |
|
190 } |
|
191 }; |
|
192 |
|
193 TestRunner.setFailureFile = function(fileName) { |
|
194 TestRunner._failureFile = fileName; |
|
195 } |
|
196 |
|
197 TestRunner.generateFailureList = function () { |
|
198 if (TestRunner._failureFile) { |
|
199 var failures = new SpecialPowersLogger(TestRunner._failureFile); |
|
200 failures.log(JSON.stringify(TestRunner._failedTests)); |
|
201 failures.close(); |
|
202 } |
|
203 }; |
|
204 |
|
205 /** |
|
206 * If logEnabled is true, this is the logger that will be used. |
|
207 **/ |
|
208 TestRunner.logger = LogController; |
|
209 |
|
210 TestRunner.log = function(msg) { |
|
211 if (TestRunner.logEnabled) { |
|
212 TestRunner.logger.log(msg); |
|
213 } else { |
|
214 dump(msg + "\n"); |
|
215 } |
|
216 }; |
|
217 |
|
218 TestRunner.error = function(msg) { |
|
219 if (TestRunner.logEnabled) { |
|
220 TestRunner.logger.error(msg); |
|
221 } else { |
|
222 dump(msg + "\n"); |
|
223 } |
|
224 |
|
225 if (TestRunner.runUntilFailure) { |
|
226 TestRunner._haltTests = true; |
|
227 } |
|
228 |
|
229 if (TestRunner.debugOnFailure) { |
|
230 // You've hit this line because you requested to break into the |
|
231 // debugger upon a testcase failure on your test run. |
|
232 debugger; |
|
233 } |
|
234 }; |
|
235 |
|
236 /** |
|
237 * Toggle element visibility |
|
238 **/ |
|
239 TestRunner._toggle = function(el) { |
|
240 if (el.className == "noshow") { |
|
241 el.className = ""; |
|
242 el.style.cssText = ""; |
|
243 } else { |
|
244 el.className = "noshow"; |
|
245 el.style.cssText = "width:0px; height:0px; border:0px;"; |
|
246 } |
|
247 }; |
|
248 |
|
249 /** |
|
250 * Creates the iframe that contains a test |
|
251 **/ |
|
252 TestRunner._makeIframe = function (url, retry) { |
|
253 var iframe = $('testframe'); |
|
254 if (url != "about:blank" && |
|
255 (("hasFocus" in document && !document.hasFocus()) || |
|
256 ("activeElement" in document && document.activeElement != iframe))) { |
|
257 |
|
258 contentAsyncEvent("Focus"); |
|
259 window.focus(); |
|
260 SpecialPowers.focus(); |
|
261 iframe.focus(); |
|
262 if (retry < 3) { |
|
263 window.setTimeout('TestRunner._makeIframe("'+url+'", '+(retry+1)+')', 1000); |
|
264 return; |
|
265 } |
|
266 |
|
267 TestRunner.log("Error: Unable to restore focus, expect failures and timeouts."); |
|
268 } |
|
269 window.scrollTo(0, $('indicator').offsetTop); |
|
270 iframe.src = url; |
|
271 iframe.name = url; |
|
272 iframe.width = "500"; |
|
273 return iframe; |
|
274 }; |
|
275 |
|
276 /** |
|
277 * Returns the current test URL. |
|
278 * We use this to tell whether the test has navigated to another test without |
|
279 * being finished first. |
|
280 */ |
|
281 TestRunner.getLoadedTestURL = function () { |
|
282 var prefix = ""; |
|
283 // handle mochitest-chrome URIs |
|
284 if ($('testframe').contentWindow.location.protocol == "chrome:") { |
|
285 prefix = "chrome://mochitests"; |
|
286 } |
|
287 return prefix + $('testframe').contentWindow.location.pathname; |
|
288 }; |
|
289 |
|
290 /** |
|
291 * TestRunner entry point. |
|
292 * |
|
293 * The arguments are the URLs of the test to be ran. |
|
294 * |
|
295 **/ |
|
296 TestRunner.runTests = function (/*url...*/) { |
|
297 TestRunner.log("SimpleTest START"); |
|
298 TestRunner.originalTestURL = $("current-test").innerHTML; |
|
299 |
|
300 SpecialPowers.registerProcessCrashObservers(); |
|
301 |
|
302 TestRunner._urls = flattenArguments(arguments); |
|
303 $('testframe').src=""; |
|
304 TestRunner._checkForHangs(); |
|
305 TestRunner.runNextTest(); |
|
306 }; |
|
307 |
|
308 /** |
|
309 * Used for running a set of tests in a loop for debugging purposes |
|
310 * Takes an array of URLs |
|
311 **/ |
|
312 TestRunner.resetTests = function(listURLs) { |
|
313 TestRunner._currentTest = 0; |
|
314 // Reset our "Current-test" line - functionality depends on it |
|
315 $("current-test").innerHTML = TestRunner.originalTestURL; |
|
316 if (TestRunner.logEnabled) |
|
317 TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop); |
|
318 |
|
319 TestRunner._urls = listURLs; |
|
320 $('testframe').src=""; |
|
321 TestRunner._checkForHangs(); |
|
322 TestRunner.runNextTest(); |
|
323 } |
|
324 |
|
325 /** |
|
326 * Run the next test. If no test remains, calls onComplete(). |
|
327 **/ |
|
328 TestRunner._haltTests = false; |
|
329 TestRunner.runNextTest = function() { |
|
330 if (TestRunner._currentTest < TestRunner._urls.length && |
|
331 !TestRunner._haltTests) |
|
332 { |
|
333 var url = TestRunner._urls[TestRunner._currentTest]; |
|
334 TestRunner.currentTestURL = url; |
|
335 |
|
336 $("current-test-path").innerHTML = url; |
|
337 |
|
338 TestRunner._currentTestStartTime = new Date().valueOf(); |
|
339 TestRunner._timeoutFactor = 1; |
|
340 TestRunner._expectedMinAsserts = 0; |
|
341 TestRunner._expectedMaxAsserts = 0; |
|
342 |
|
343 TestRunner.log("TEST-START | " + url); // used by automation.py |
|
344 |
|
345 TestRunner._makeIframe(url, 0); |
|
346 } else { |
|
347 $("current-test").innerHTML = "<b>Finished</b>"; |
|
348 TestRunner._makeIframe("about:blank", 0); |
|
349 |
|
350 if (parseInt($("pass-count").innerHTML) == 0 && |
|
351 parseInt($("fail-count").innerHTML) == 0 && |
|
352 parseInt($("todo-count").innerHTML) == 0) |
|
353 { |
|
354 // No |$('testframe').contentWindow|, so manually update: ... |
|
355 // ... the log, |
|
356 TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run."); |
|
357 // ... the count, |
|
358 $("fail-count").innerHTML = 1; |
|
359 // ... the indicator. |
|
360 var indicator = $("indicator"); |
|
361 indicator.innerHTML = "Status: Fail (No checks actually run)"; |
|
362 indicator.style.backgroundColor = "red"; |
|
363 } |
|
364 |
|
365 SpecialPowers.unregisterProcessCrashObservers(); |
|
366 |
|
367 TestRunner.log("TEST-START | Shutdown"); // used by automation.py |
|
368 TestRunner.log("Passed: " + $("pass-count").innerHTML); |
|
369 TestRunner.log("Failed: " + $("fail-count").innerHTML); |
|
370 TestRunner.log("Todo: " + $("todo-count").innerHTML); |
|
371 TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL); |
|
372 // If we are looping, don't send this cause it closes the log file |
|
373 if (TestRunner.repeat == 0) { |
|
374 TestRunner.log("SimpleTest FINISHED"); |
|
375 } |
|
376 |
|
377 if (TestRunner.repeat == 0 && TestRunner.onComplete) { |
|
378 TestRunner.onComplete(); |
|
379 } |
|
380 |
|
381 if (TestRunner._currentLoop <= TestRunner.repeat && !TestRunner._haltTests) { |
|
382 TestRunner._currentLoop++; |
|
383 TestRunner.resetTests(TestRunner._urls); |
|
384 TestRunner._loopIsRestarting = true; |
|
385 } else { |
|
386 // Loops are finished |
|
387 if (TestRunner.logEnabled) { |
|
388 TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops"); |
|
389 TestRunner.log("SimpleTest FINISHED"); |
|
390 } |
|
391 |
|
392 if (TestRunner.onComplete) |
|
393 TestRunner.onComplete(); |
|
394 } |
|
395 TestRunner.generateFailureList(); |
|
396 } |
|
397 }; |
|
398 |
|
399 TestRunner.expectChildProcessCrash = function() { |
|
400 TestRunner._expectingProcessCrash = true; |
|
401 }; |
|
402 |
|
403 /** |
|
404 * This stub is called by SimpleTest when a test is finished. |
|
405 **/ |
|
406 TestRunner.testFinished = function(tests) { |
|
407 // Prevent a test from calling finish() multiple times before we |
|
408 // have a chance to unload it. |
|
409 if (TestRunner._currentTest == TestRunner._lastTestFinished && |
|
410 !TestRunner._loopIsRestarting) { |
|
411 TestRunner.error("TEST-UNEXPECTED-FAIL | " + |
|
412 TestRunner.currentTestURL + |
|
413 " | called finish() multiple times"); |
|
414 TestRunner.updateUI([{ result: false }]); |
|
415 return; |
|
416 } |
|
417 TestRunner._lastTestFinished = TestRunner._currentTest; |
|
418 TestRunner._loopIsRestarting = false; |
|
419 |
|
420 MemoryStats.dump(TestRunner.log, TestRunner._currentTest, |
|
421 TestRunner.currentTestURL, |
|
422 TestRunner.dumpOutputDirectory, |
|
423 TestRunner.dumpAboutMemoryAfterTest, |
|
424 TestRunner.dumpDMDAfterTest); |
|
425 |
|
426 function cleanUpCrashDumpFiles() { |
|
427 if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) { |
|
428 TestRunner.error("TEST-UNEXPECTED-FAIL | " + |
|
429 TestRunner.currentTestURL + |
|
430 " | This test did not leave any crash dumps behind, but we were expecting some!"); |
|
431 tests.push({ result: false }); |
|
432 } |
|
433 var unexpectedCrashDumpFiles = |
|
434 SpecialPowers.findUnexpectedCrashDumpFiles(); |
|
435 TestRunner._expectingProcessCrash = false; |
|
436 if (unexpectedCrashDumpFiles.length) { |
|
437 TestRunner.error("TEST-UNEXPECTED-FAIL | " + |
|
438 TestRunner.currentTestURL + |
|
439 " | This test left crash dumps behind, but we " + |
|
440 "weren't expecting it to!"); |
|
441 tests.push({ result: false }); |
|
442 unexpectedCrashDumpFiles.sort().forEach(function(aFilename) { |
|
443 TestRunner.log("TEST-INFO | Found unexpected crash dump file " + |
|
444 aFilename + "."); |
|
445 }); |
|
446 } |
|
447 } |
|
448 |
|
449 function runNextTest() { |
|
450 if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) { |
|
451 TestRunner.error("TEST-UNEXPECTED-FAIL | " + |
|
452 TestRunner.currentTestURL + |
|
453 " | " + TestRunner.getLoadedTestURL() + |
|
454 " finished in a non-clean fashion, probably" + |
|
455 " because it didn't call SimpleTest.finish()"); |
|
456 tests.push({ result: false }); |
|
457 } |
|
458 |
|
459 var runtime = new Date().valueOf() - TestRunner._currentTestStartTime; |
|
460 TestRunner.log("TEST-END | " + |
|
461 TestRunner.currentTestURL + |
|
462 " | finished in " + runtime + "ms"); |
|
463 if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) { |
|
464 TestRunner.slowestTestTime = runtime; |
|
465 TestRunner.slowestTestURL = TestRunner.currentTestURL; |
|
466 } |
|
467 |
|
468 TestRunner.updateUI(tests); |
|
469 |
|
470 var interstitialURL; |
|
471 if ($('testframe').contentWindow.location.protocol == "chrome:") { |
|
472 interstitialURL = "tests/SimpleTest/iframe-between-tests.html"; |
|
473 } else { |
|
474 interstitialURL = "/tests/SimpleTest/iframe-between-tests.html"; |
|
475 } |
|
476 TestRunner._makeIframe(interstitialURL, 0); |
|
477 } |
|
478 |
|
479 SpecialPowers.executeAfterFlushingMessageQueue(function() { |
|
480 cleanUpCrashDumpFiles(); |
|
481 SpecialPowers.flushAllAppsLaunchable(); |
|
482 SpecialPowers.flushPermissions(function () { SpecialPowers.flushPrefEnv(runNextTest); }); |
|
483 }); |
|
484 }; |
|
485 |
|
486 TestRunner.testUnloaded = function() { |
|
487 // If we're in a debug build, check assertion counts. This code is |
|
488 // similar to the code in Tester_nextTest in browser-test.js used |
|
489 // for browser-chrome mochitests. |
|
490 if (SpecialPowers.isDebugBuild) { |
|
491 var newAssertionCount = SpecialPowers.assertionCount(); |
|
492 var numAsserts = newAssertionCount - TestRunner._lastAssertionCount; |
|
493 TestRunner._lastAssertionCount = newAssertionCount; |
|
494 |
|
495 var url = TestRunner._urls[TestRunner._currentTest]; |
|
496 var max = TestRunner._expectedMaxAsserts; |
|
497 var min = TestRunner._expectedMinAsserts; |
|
498 if (numAsserts > max) { |
|
499 TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions."); |
|
500 TestRunner.updateUI([{ result: false }]); |
|
501 } else if (numAsserts < min) { |
|
502 TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions."); |
|
503 TestRunner.updateUI([{ result: false }]); |
|
504 } else if (numAsserts > 0) { |
|
505 TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions."); |
|
506 } |
|
507 } |
|
508 TestRunner._currentTest++; |
|
509 if (TestRunner.runSlower) { |
|
510 setTimeout(TestRunner.runNextTest, 1000); |
|
511 } else { |
|
512 TestRunner.runNextTest(); |
|
513 } |
|
514 }; |
|
515 |
|
516 /** |
|
517 * Get the results. |
|
518 */ |
|
519 TestRunner.countResults = function(tests) { |
|
520 var nOK = 0; |
|
521 var nNotOK = 0; |
|
522 var nTodo = 0; |
|
523 for (var i = 0; i < tests.length; ++i) { |
|
524 var test = tests[i]; |
|
525 if (test.todo && !test.result) { |
|
526 nTodo++; |
|
527 } else if (test.result && !test.todo) { |
|
528 nOK++; |
|
529 } else { |
|
530 nNotOK++; |
|
531 } |
|
532 } |
|
533 return {"OK": nOK, "notOK": nNotOK, "todo": nTodo}; |
|
534 } |
|
535 |
|
536 /** |
|
537 * Print out table of any error messages found during looped run |
|
538 */ |
|
539 TestRunner.displayLoopErrors = function(tableName, tests) { |
|
540 if(TestRunner.countResults(tests).notOK >0){ |
|
541 var table = $(tableName); |
|
542 var curtest; |
|
543 if (table.rows.length == 0) { |
|
544 //if table headers are not yet generated, make them |
|
545 var row = table.insertRow(table.rows.length); |
|
546 var cell = row.insertCell(0); |
|
547 var textNode = document.createTextNode("Test File Name:"); |
|
548 cell.appendChild(textNode); |
|
549 cell = row.insertCell(1); |
|
550 textNode = document.createTextNode("Test:"); |
|
551 cell.appendChild(textNode); |
|
552 cell = row.insertCell(2); |
|
553 textNode = document.createTextNode("Error message:"); |
|
554 cell.appendChild(textNode); |
|
555 } |
|
556 |
|
557 //find the broken test |
|
558 for (var testnum in tests){ |
|
559 curtest = tests[testnum]; |
|
560 if( !((curtest.todo && !curtest.result) || (curtest.result && !curtest.todo)) ){ |
|
561 //this is a failed test or the result of todo test. Display the related message |
|
562 row = table.insertRow(table.rows.length); |
|
563 cell = row.insertCell(0); |
|
564 textNode = document.createTextNode(TestRunner.currentTestURL); |
|
565 cell.appendChild(textNode); |
|
566 cell = row.insertCell(1); |
|
567 textNode = document.createTextNode(curtest.name); |
|
568 cell.appendChild(textNode); |
|
569 cell = row.insertCell(2); |
|
570 textNode = document.createTextNode((curtest.diag ? curtest.diag : "" )); |
|
571 cell.appendChild(textNode); |
|
572 } |
|
573 } |
|
574 } |
|
575 } |
|
576 |
|
577 TestRunner.updateUI = function(tests) { |
|
578 var results = TestRunner.countResults(tests); |
|
579 var passCount = parseInt($("pass-count").innerHTML) + results.OK; |
|
580 var failCount = parseInt($("fail-count").innerHTML) + results.notOK; |
|
581 var todoCount = parseInt($("todo-count").innerHTML) + results.todo; |
|
582 $("pass-count").innerHTML = passCount; |
|
583 $("fail-count").innerHTML = failCount; |
|
584 $("todo-count").innerHTML = todoCount; |
|
585 |
|
586 // Set the top Green/Red bar |
|
587 var indicator = $("indicator"); |
|
588 if (failCount > 0) { |
|
589 indicator.innerHTML = "Status: Fail"; |
|
590 indicator.style.backgroundColor = "red"; |
|
591 } else if (passCount > 0) { |
|
592 indicator.innerHTML = "Status: Pass"; |
|
593 indicator.style.backgroundColor = "#0d0"; |
|
594 } else { |
|
595 indicator.innerHTML = "Status: ToDo"; |
|
596 indicator.style.backgroundColor = "orange"; |
|
597 } |
|
598 |
|
599 // Set the table values |
|
600 var trID = "tr-" + $('current-test-path').innerHTML; |
|
601 var row = $(trID); |
|
602 |
|
603 // Only update the row if it actually exists (autoUI) |
|
604 if (row != null) { |
|
605 var tds = row.getElementsByTagName("td"); |
|
606 tds[0].style.backgroundColor = "#0d0"; |
|
607 tds[0].innerHTML = parseInt(tds[0].innerHTML) + parseInt(results.OK); |
|
608 tds[1].style.backgroundColor = results.notOK > 0 ? "red" : "#0d0"; |
|
609 tds[1].innerHTML = parseInt(tds[1].innerHTML) + parseInt(results.notOK); |
|
610 tds[2].style.backgroundColor = results.todo > 0 ? "orange" : "#0d0"; |
|
611 tds[2].innerHTML = parseInt(tds[2].innerHTML) + parseInt(results.todo); |
|
612 } |
|
613 |
|
614 //if we ran in a loop, display any found errors |
|
615 if (TestRunner.repeat > 0) { |
|
616 TestRunner.displayLoopErrors('fail-table', tests); |
|
617 } |
|
618 } |