michael@0: /* -*- js-indent-level: 4 -*- */ michael@0: /* michael@0: * e10s event dispatcher from content->chrome michael@0: * michael@0: * type = eventName (QuitApplication) michael@0: * data = json object {"filename":filename} <- for LoggerInit michael@0: */ michael@0: function getElement(id) { michael@0: return ((typeof(id) == "string") ? michael@0: document.getElementById(id) : id); michael@0: } michael@0: michael@0: this.$ = this.getElement; michael@0: michael@0: function contentDispatchEvent(type, data, sync) { michael@0: if (typeof(data) == "undefined") { michael@0: data = {}; michael@0: } michael@0: michael@0: var event = new CustomEvent("contentEvent", { michael@0: bubbles: true, michael@0: detail: { michael@0: "sync": sync, michael@0: "type": type, michael@0: "data": JSON.stringify(data) michael@0: } michael@0: }); michael@0: document.dispatchEvent(event); michael@0: } michael@0: michael@0: function contentAsyncEvent(type, data) { michael@0: contentDispatchEvent(type, data, 0); michael@0: } michael@0: michael@0: /* Helper Function */ michael@0: function extend(obj, /* optional */ skip) { michael@0: // Extend an array with an array-like object starting michael@0: // from the skip index michael@0: if (!skip) { michael@0: skip = 0; michael@0: } michael@0: if (obj) { michael@0: var l = obj.length; michael@0: var ret = []; michael@0: for (var i = skip; i < l; i++) { michael@0: ret.push(obj[i]); michael@0: } michael@0: } michael@0: return ret; michael@0: } michael@0: michael@0: function flattenArguments(lst/* ...*/) { michael@0: var res = []; michael@0: var args = extend(arguments); michael@0: while (args.length) { michael@0: var o = args.shift(); michael@0: if (o && typeof(o) == "object" && typeof(o.length) == "number") { michael@0: for (var i = o.length - 1; i >= 0; i--) { michael@0: args.unshift(o[i]); michael@0: } michael@0: } else { michael@0: res.push(o); michael@0: } michael@0: } michael@0: return res; michael@0: } michael@0: michael@0: /** michael@0: * TestRunner: A test runner for SimpleTest michael@0: * TODO: michael@0: * michael@0: * * Avoid moving iframes: That causes reloads on mozilla and opera. michael@0: * michael@0: * michael@0: **/ michael@0: var TestRunner = {}; michael@0: TestRunner.logEnabled = false; michael@0: TestRunner._currentTest = 0; michael@0: TestRunner._lastTestFinished = -1; michael@0: TestRunner._loopIsRestarting = false; michael@0: TestRunner.currentTestURL = ""; michael@0: TestRunner.originalTestURL = ""; michael@0: TestRunner._urls = []; michael@0: TestRunner._lastAssertionCount = 0; michael@0: TestRunner._expectedMinAsserts = 0; michael@0: TestRunner._expectedMaxAsserts = 0; michael@0: michael@0: TestRunner.timeout = 5 * 60 * 1000; // 5 minutes. michael@0: TestRunner.maxTimeouts = 4; // halt testing after too many timeouts michael@0: TestRunner.runSlower = false; michael@0: TestRunner.dumpOutputDirectory = ""; michael@0: TestRunner.dumpAboutMemoryAfterTest = false; michael@0: TestRunner.dumpDMDAfterTest = false; michael@0: TestRunner.quiet = false; michael@0: TestRunner.slowestTestTime = 0; michael@0: TestRunner.slowestTestURL = ""; michael@0: michael@0: TestRunner._expectingProcessCrash = false; michael@0: michael@0: /** michael@0: * Make sure the tests don't hang indefinitely. michael@0: **/ michael@0: TestRunner._numTimeouts = 0; michael@0: TestRunner._currentTestStartTime = new Date().valueOf(); michael@0: TestRunner._timeoutFactor = 1; michael@0: michael@0: TestRunner._checkForHangs = function() { michael@0: function reportError(win, msg) { michael@0: if ("SimpleTest" in win) { michael@0: win.SimpleTest.ok(false, msg); michael@0: } else if ("W3CTest" in win) { michael@0: win.W3CTest.logFailure(msg); michael@0: } michael@0: } michael@0: michael@0: function killTest(win) { michael@0: if ("SimpleTest" in win) { michael@0: win.SimpleTest.finish(); michael@0: } else if ("W3CTest" in win) { michael@0: win.W3CTest.timeout(); michael@0: } michael@0: } michael@0: michael@0: if (TestRunner._currentTest < TestRunner._urls.length) { michael@0: var runtime = new Date().valueOf() - TestRunner._currentTestStartTime; michael@0: if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) { michael@0: var frameWindow = $('testframe').contentWindow.wrappedJSObject || michael@0: $('testframe').contentWindow; michael@0: reportError(frameWindow, "Test timed out."); michael@0: michael@0: // If we have too many timeouts, give up. We don't want to wait hours michael@0: // for results if some bug causes lots of tests to time out. michael@0: if (++TestRunner._numTimeouts >= TestRunner.maxTimeouts) { michael@0: TestRunner._haltTests = true; michael@0: michael@0: TestRunner.currentTestURL = "(SimpleTest/TestRunner.js)"; michael@0: reportError(frameWindow, TestRunner.maxTimeouts + " test timeouts, giving up."); michael@0: var skippedTests = TestRunner._urls.length - TestRunner._currentTest; michael@0: reportError(frameWindow, "Skipping " + skippedTests + " remaining tests."); michael@0: } michael@0: michael@0: // Add a little (1 second) delay to ensure automation.py has time to notice michael@0: // "Test timed out" log and process it (= take a screenshot). michael@0: setTimeout(function delayedKillTest() { killTest(frameWindow); }, 1000); michael@0: michael@0: if (TestRunner._haltTests) michael@0: return; michael@0: } michael@0: michael@0: setTimeout(TestRunner._checkForHangs, 30000); michael@0: } michael@0: } michael@0: michael@0: TestRunner.requestLongerTimeout = function(factor) { michael@0: TestRunner._timeoutFactor = factor; michael@0: } michael@0: michael@0: /** michael@0: * This is used to loop tests michael@0: **/ michael@0: TestRunner.repeat = 0; michael@0: TestRunner._currentLoop = 1; michael@0: michael@0: TestRunner.expectAssertions = function(min, max) { michael@0: if (typeof(max) == "undefined") { michael@0: max = min; michael@0: } michael@0: if (typeof(min) != "number" || typeof(max) != "number" || michael@0: min < 0 || max < min) { michael@0: throw "bad parameter to expectAssertions"; michael@0: } michael@0: TestRunner._expectedMinAsserts = min; michael@0: TestRunner._expectedMaxAsserts = max; michael@0: } michael@0: michael@0: /** michael@0: * This function is called after generating the summary. michael@0: **/ michael@0: TestRunner.onComplete = null; michael@0: michael@0: /** michael@0: * Adds a failed test case to a list so we can rerun only the failed tests michael@0: **/ michael@0: TestRunner._failedTests = {}; michael@0: TestRunner._failureFile = ""; michael@0: michael@0: TestRunner.addFailedTest = function(testName) { michael@0: if (TestRunner._failedTests[testName] == undefined) { michael@0: TestRunner._failedTests[testName] = ""; michael@0: } michael@0: }; michael@0: michael@0: TestRunner.setFailureFile = function(fileName) { michael@0: TestRunner._failureFile = fileName; michael@0: } michael@0: michael@0: TestRunner.generateFailureList = function () { michael@0: if (TestRunner._failureFile) { michael@0: var failures = new SpecialPowersLogger(TestRunner._failureFile); michael@0: failures.log(JSON.stringify(TestRunner._failedTests)); michael@0: failures.close(); michael@0: } michael@0: }; michael@0: michael@0: /** michael@0: * If logEnabled is true, this is the logger that will be used. michael@0: **/ michael@0: TestRunner.logger = LogController; michael@0: michael@0: TestRunner.log = function(msg) { michael@0: if (TestRunner.logEnabled) { michael@0: TestRunner.logger.log(msg); michael@0: } else { michael@0: dump(msg + "\n"); michael@0: } michael@0: }; michael@0: michael@0: TestRunner.error = function(msg) { michael@0: if (TestRunner.logEnabled) { michael@0: TestRunner.logger.error(msg); michael@0: } else { michael@0: dump(msg + "\n"); michael@0: } michael@0: michael@0: if (TestRunner.runUntilFailure) { michael@0: TestRunner._haltTests = true; michael@0: } michael@0: michael@0: if (TestRunner.debugOnFailure) { michael@0: // You've hit this line because you requested to break into the michael@0: // debugger upon a testcase failure on your test run. michael@0: debugger; michael@0: } michael@0: }; michael@0: michael@0: /** michael@0: * Toggle element visibility michael@0: **/ michael@0: TestRunner._toggle = function(el) { michael@0: if (el.className == "noshow") { michael@0: el.className = ""; michael@0: el.style.cssText = ""; michael@0: } else { michael@0: el.className = "noshow"; michael@0: el.style.cssText = "width:0px; height:0px; border:0px;"; michael@0: } michael@0: }; michael@0: michael@0: /** michael@0: * Creates the iframe that contains a test michael@0: **/ michael@0: TestRunner._makeIframe = function (url, retry) { michael@0: var iframe = $('testframe'); michael@0: if (url != "about:blank" && michael@0: (("hasFocus" in document && !document.hasFocus()) || michael@0: ("activeElement" in document && document.activeElement != iframe))) { michael@0: michael@0: contentAsyncEvent("Focus"); michael@0: window.focus(); michael@0: SpecialPowers.focus(); michael@0: iframe.focus(); michael@0: if (retry < 3) { michael@0: window.setTimeout('TestRunner._makeIframe("'+url+'", '+(retry+1)+')', 1000); michael@0: return; michael@0: } michael@0: michael@0: TestRunner.log("Error: Unable to restore focus, expect failures and timeouts."); michael@0: } michael@0: window.scrollTo(0, $('indicator').offsetTop); michael@0: iframe.src = url; michael@0: iframe.name = url; michael@0: iframe.width = "500"; michael@0: return iframe; michael@0: }; michael@0: michael@0: /** michael@0: * Returns the current test URL. michael@0: * We use this to tell whether the test has navigated to another test without michael@0: * being finished first. michael@0: */ michael@0: TestRunner.getLoadedTestURL = function () { michael@0: var prefix = ""; michael@0: // handle mochitest-chrome URIs michael@0: if ($('testframe').contentWindow.location.protocol == "chrome:") { michael@0: prefix = "chrome://mochitests"; michael@0: } michael@0: return prefix + $('testframe').contentWindow.location.pathname; michael@0: }; michael@0: michael@0: /** michael@0: * TestRunner entry point. michael@0: * michael@0: * The arguments are the URLs of the test to be ran. michael@0: * michael@0: **/ michael@0: TestRunner.runTests = function (/*url...*/) { michael@0: TestRunner.log("SimpleTest START"); michael@0: TestRunner.originalTestURL = $("current-test").innerHTML; michael@0: michael@0: SpecialPowers.registerProcessCrashObservers(); michael@0: michael@0: TestRunner._urls = flattenArguments(arguments); michael@0: $('testframe').src=""; michael@0: TestRunner._checkForHangs(); michael@0: TestRunner.runNextTest(); michael@0: }; michael@0: michael@0: /** michael@0: * Used for running a set of tests in a loop for debugging purposes michael@0: * Takes an array of URLs michael@0: **/ michael@0: TestRunner.resetTests = function(listURLs) { michael@0: TestRunner._currentTest = 0; michael@0: // Reset our "Current-test" line - functionality depends on it michael@0: $("current-test").innerHTML = TestRunner.originalTestURL; michael@0: if (TestRunner.logEnabled) michael@0: TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop); michael@0: michael@0: TestRunner._urls = listURLs; michael@0: $('testframe').src=""; michael@0: TestRunner._checkForHangs(); michael@0: TestRunner.runNextTest(); michael@0: } michael@0: michael@0: /** michael@0: * Run the next test. If no test remains, calls onComplete(). michael@0: **/ michael@0: TestRunner._haltTests = false; michael@0: TestRunner.runNextTest = function() { michael@0: if (TestRunner._currentTest < TestRunner._urls.length && michael@0: !TestRunner._haltTests) michael@0: { michael@0: var url = TestRunner._urls[TestRunner._currentTest]; michael@0: TestRunner.currentTestURL = url; michael@0: michael@0: $("current-test-path").innerHTML = url; michael@0: michael@0: TestRunner._currentTestStartTime = new Date().valueOf(); michael@0: TestRunner._timeoutFactor = 1; michael@0: TestRunner._expectedMinAsserts = 0; michael@0: TestRunner._expectedMaxAsserts = 0; michael@0: michael@0: TestRunner.log("TEST-START | " + url); // used by automation.py michael@0: michael@0: TestRunner._makeIframe(url, 0); michael@0: } else { michael@0: $("current-test").innerHTML = "Finished"; michael@0: TestRunner._makeIframe("about:blank", 0); michael@0: michael@0: if (parseInt($("pass-count").innerHTML) == 0 && michael@0: parseInt($("fail-count").innerHTML) == 0 && michael@0: parseInt($("todo-count").innerHTML) == 0) michael@0: { michael@0: // No |$('testframe').contentWindow|, so manually update: ... michael@0: // ... the log, michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run."); michael@0: // ... the count, michael@0: $("fail-count").innerHTML = 1; michael@0: // ... the indicator. michael@0: var indicator = $("indicator"); michael@0: indicator.innerHTML = "Status: Fail (No checks actually run)"; michael@0: indicator.style.backgroundColor = "red"; michael@0: } michael@0: michael@0: SpecialPowers.unregisterProcessCrashObservers(); michael@0: michael@0: TestRunner.log("TEST-START | Shutdown"); // used by automation.py michael@0: TestRunner.log("Passed: " + $("pass-count").innerHTML); michael@0: TestRunner.log("Failed: " + $("fail-count").innerHTML); michael@0: TestRunner.log("Todo: " + $("todo-count").innerHTML); michael@0: TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL); michael@0: // If we are looping, don't send this cause it closes the log file michael@0: if (TestRunner.repeat == 0) { michael@0: TestRunner.log("SimpleTest FINISHED"); michael@0: } michael@0: michael@0: if (TestRunner.repeat == 0 && TestRunner.onComplete) { michael@0: TestRunner.onComplete(); michael@0: } michael@0: michael@0: if (TestRunner._currentLoop <= TestRunner.repeat && !TestRunner._haltTests) { michael@0: TestRunner._currentLoop++; michael@0: TestRunner.resetTests(TestRunner._urls); michael@0: TestRunner._loopIsRestarting = true; michael@0: } else { michael@0: // Loops are finished michael@0: if (TestRunner.logEnabled) { michael@0: TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops"); michael@0: TestRunner.log("SimpleTest FINISHED"); michael@0: } michael@0: michael@0: if (TestRunner.onComplete) michael@0: TestRunner.onComplete(); michael@0: } michael@0: TestRunner.generateFailureList(); michael@0: } michael@0: }; michael@0: michael@0: TestRunner.expectChildProcessCrash = function() { michael@0: TestRunner._expectingProcessCrash = true; michael@0: }; michael@0: michael@0: /** michael@0: * This stub is called by SimpleTest when a test is finished. michael@0: **/ michael@0: TestRunner.testFinished = function(tests) { michael@0: // Prevent a test from calling finish() multiple times before we michael@0: // have a chance to unload it. michael@0: if (TestRunner._currentTest == TestRunner._lastTestFinished && michael@0: !TestRunner._loopIsRestarting) { michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | " + michael@0: TestRunner.currentTestURL + michael@0: " | called finish() multiple times"); michael@0: TestRunner.updateUI([{ result: false }]); michael@0: return; michael@0: } michael@0: TestRunner._lastTestFinished = TestRunner._currentTest; michael@0: TestRunner._loopIsRestarting = false; michael@0: michael@0: MemoryStats.dump(TestRunner.log, TestRunner._currentTest, michael@0: TestRunner.currentTestURL, michael@0: TestRunner.dumpOutputDirectory, michael@0: TestRunner.dumpAboutMemoryAfterTest, michael@0: TestRunner.dumpDMDAfterTest); michael@0: michael@0: function cleanUpCrashDumpFiles() { michael@0: if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) { michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | " + michael@0: TestRunner.currentTestURL + michael@0: " | This test did not leave any crash dumps behind, but we were expecting some!"); michael@0: tests.push({ result: false }); michael@0: } michael@0: var unexpectedCrashDumpFiles = michael@0: SpecialPowers.findUnexpectedCrashDumpFiles(); michael@0: TestRunner._expectingProcessCrash = false; michael@0: if (unexpectedCrashDumpFiles.length) { michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | " + michael@0: TestRunner.currentTestURL + michael@0: " | This test left crash dumps behind, but we " + michael@0: "weren't expecting it to!"); michael@0: tests.push({ result: false }); michael@0: unexpectedCrashDumpFiles.sort().forEach(function(aFilename) { michael@0: TestRunner.log("TEST-INFO | Found unexpected crash dump file " + michael@0: aFilename + "."); michael@0: }); michael@0: } michael@0: } michael@0: michael@0: function runNextTest() { michael@0: if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) { michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | " + michael@0: TestRunner.currentTestURL + michael@0: " | " + TestRunner.getLoadedTestURL() + michael@0: " finished in a non-clean fashion, probably" + michael@0: " because it didn't call SimpleTest.finish()"); michael@0: tests.push({ result: false }); michael@0: } michael@0: michael@0: var runtime = new Date().valueOf() - TestRunner._currentTestStartTime; michael@0: TestRunner.log("TEST-END | " + michael@0: TestRunner.currentTestURL + michael@0: " | finished in " + runtime + "ms"); michael@0: if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) { michael@0: TestRunner.slowestTestTime = runtime; michael@0: TestRunner.slowestTestURL = TestRunner.currentTestURL; michael@0: } michael@0: michael@0: TestRunner.updateUI(tests); michael@0: michael@0: var interstitialURL; michael@0: if ($('testframe').contentWindow.location.protocol == "chrome:") { michael@0: interstitialURL = "tests/SimpleTest/iframe-between-tests.html"; michael@0: } else { michael@0: interstitialURL = "/tests/SimpleTest/iframe-between-tests.html"; michael@0: } michael@0: TestRunner._makeIframe(interstitialURL, 0); michael@0: } michael@0: michael@0: SpecialPowers.executeAfterFlushingMessageQueue(function() { michael@0: cleanUpCrashDumpFiles(); michael@0: SpecialPowers.flushAllAppsLaunchable(); michael@0: SpecialPowers.flushPermissions(function () { SpecialPowers.flushPrefEnv(runNextTest); }); michael@0: }); michael@0: }; michael@0: michael@0: TestRunner.testUnloaded = function() { michael@0: // If we're in a debug build, check assertion counts. This code is michael@0: // similar to the code in Tester_nextTest in browser-test.js used michael@0: // for browser-chrome mochitests. michael@0: if (SpecialPowers.isDebugBuild) { michael@0: var newAssertionCount = SpecialPowers.assertionCount(); michael@0: var numAsserts = newAssertionCount - TestRunner._lastAssertionCount; michael@0: TestRunner._lastAssertionCount = newAssertionCount; michael@0: michael@0: var url = TestRunner._urls[TestRunner._currentTest]; michael@0: var max = TestRunner._expectedMaxAsserts; michael@0: var min = TestRunner._expectedMinAsserts; michael@0: if (numAsserts > max) { michael@0: TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions."); michael@0: TestRunner.updateUI([{ result: false }]); michael@0: } else if (numAsserts < min) { michael@0: TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions."); michael@0: TestRunner.updateUI([{ result: false }]); michael@0: } else if (numAsserts > 0) { michael@0: TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions."); michael@0: } michael@0: } michael@0: TestRunner._currentTest++; michael@0: if (TestRunner.runSlower) { michael@0: setTimeout(TestRunner.runNextTest, 1000); michael@0: } else { michael@0: TestRunner.runNextTest(); michael@0: } michael@0: }; michael@0: michael@0: /** michael@0: * Get the results. michael@0: */ michael@0: TestRunner.countResults = function(tests) { michael@0: var nOK = 0; michael@0: var nNotOK = 0; michael@0: var nTodo = 0; michael@0: for (var i = 0; i < tests.length; ++i) { michael@0: var test = tests[i]; michael@0: if (test.todo && !test.result) { michael@0: nTodo++; michael@0: } else if (test.result && !test.todo) { michael@0: nOK++; michael@0: } else { michael@0: nNotOK++; michael@0: } michael@0: } michael@0: return {"OK": nOK, "notOK": nNotOK, "todo": nTodo}; michael@0: } michael@0: michael@0: /** michael@0: * Print out table of any error messages found during looped run michael@0: */ michael@0: TestRunner.displayLoopErrors = function(tableName, tests) { michael@0: if(TestRunner.countResults(tests).notOK >0){ michael@0: var table = $(tableName); michael@0: var curtest; michael@0: if (table.rows.length == 0) { michael@0: //if table headers are not yet generated, make them michael@0: var row = table.insertRow(table.rows.length); michael@0: var cell = row.insertCell(0); michael@0: var textNode = document.createTextNode("Test File Name:"); michael@0: cell.appendChild(textNode); michael@0: cell = row.insertCell(1); michael@0: textNode = document.createTextNode("Test:"); michael@0: cell.appendChild(textNode); michael@0: cell = row.insertCell(2); michael@0: textNode = document.createTextNode("Error message:"); michael@0: cell.appendChild(textNode); michael@0: } michael@0: michael@0: //find the broken test michael@0: for (var testnum in tests){ michael@0: curtest = tests[testnum]; michael@0: if( !((curtest.todo && !curtest.result) || (curtest.result && !curtest.todo)) ){ michael@0: //this is a failed test or the result of todo test. Display the related message michael@0: row = table.insertRow(table.rows.length); michael@0: cell = row.insertCell(0); michael@0: textNode = document.createTextNode(TestRunner.currentTestURL); michael@0: cell.appendChild(textNode); michael@0: cell = row.insertCell(1); michael@0: textNode = document.createTextNode(curtest.name); michael@0: cell.appendChild(textNode); michael@0: cell = row.insertCell(2); michael@0: textNode = document.createTextNode((curtest.diag ? curtest.diag : "" )); michael@0: cell.appendChild(textNode); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: TestRunner.updateUI = function(tests) { michael@0: var results = TestRunner.countResults(tests); michael@0: var passCount = parseInt($("pass-count").innerHTML) + results.OK; michael@0: var failCount = parseInt($("fail-count").innerHTML) + results.notOK; michael@0: var todoCount = parseInt($("todo-count").innerHTML) + results.todo; michael@0: $("pass-count").innerHTML = passCount; michael@0: $("fail-count").innerHTML = failCount; michael@0: $("todo-count").innerHTML = todoCount; michael@0: michael@0: // Set the top Green/Red bar michael@0: var indicator = $("indicator"); michael@0: if (failCount > 0) { michael@0: indicator.innerHTML = "Status: Fail"; michael@0: indicator.style.backgroundColor = "red"; michael@0: } else if (passCount > 0) { michael@0: indicator.innerHTML = "Status: Pass"; michael@0: indicator.style.backgroundColor = "#0d0"; michael@0: } else { michael@0: indicator.innerHTML = "Status: ToDo"; michael@0: indicator.style.backgroundColor = "orange"; michael@0: } michael@0: michael@0: // Set the table values michael@0: var trID = "tr-" + $('current-test-path').innerHTML; michael@0: var row = $(trID); michael@0: michael@0: // Only update the row if it actually exists (autoUI) michael@0: if (row != null) { michael@0: var tds = row.getElementsByTagName("td"); michael@0: tds[0].style.backgroundColor = "#0d0"; michael@0: tds[0].innerHTML = parseInt(tds[0].innerHTML) + parseInt(results.OK); michael@0: tds[1].style.backgroundColor = results.notOK > 0 ? "red" : "#0d0"; michael@0: tds[1].innerHTML = parseInt(tds[1].innerHTML) + parseInt(results.notOK); michael@0: tds[2].style.backgroundColor = results.todo > 0 ? "orange" : "#0d0"; michael@0: tds[2].innerHTML = parseInt(tds[2].innerHTML) + parseInt(results.todo); michael@0: } michael@0: michael@0: //if we ran in a loop, display any found errors michael@0: if (TestRunner.repeat > 0) { michael@0: TestRunner.displayLoopErrors('fail-table', tests); michael@0: } michael@0: }