michael@0: # Copyright (c) 2012 The Chromium Authors. All rights reserved. michael@0: # Use of this source code is governed by a BSD-style license that can be michael@0: # found in the LICENSE file. michael@0: michael@0: michael@0: import logging michael@0: import re michael@0: import os michael@0: michael@0: import constants michael@0: from perf_tests_helper import PrintPerfResult michael@0: from pylib import pexpect michael@0: from test_result import BaseTestResult, TestResults michael@0: michael@0: michael@0: # TODO(bulach): TestPackage, TestPackageExecutable and michael@0: # TestPackageApk are a work in progress related to making the native tests michael@0: # run as a NDK-app from an APK rather than a stand-alone executable. michael@0: class TestPackage(object): michael@0: """A helper base class for both APK and stand-alone executables. michael@0: michael@0: Args: michael@0: adb: ADB interface the tests are using. michael@0: device: Device to run the tests. michael@0: test_suite: A specific test suite to run, empty to run all. michael@0: timeout: Timeout for each test. michael@0: rebaseline: Whether or not to run tests in isolation and update the filter. michael@0: performance_test: Whether or not performance test(s). michael@0: cleanup_test_files: Whether or not to cleanup test files on device. michael@0: tool: Name of the Valgrind tool. michael@0: dump_debug_info: A debug_info object. michael@0: """ michael@0: michael@0: def __init__(self, adb, device, test_suite, timeout, rebaseline, michael@0: performance_test, cleanup_test_files, tool, dump_debug_info): michael@0: self.adb = adb michael@0: self.device = device michael@0: self.test_suite_full = test_suite michael@0: self.test_suite = os.path.splitext(test_suite)[0] michael@0: self.test_suite_basename = self._GetTestSuiteBaseName() michael@0: self.test_suite_dirname = os.path.dirname( michael@0: self.test_suite.split(self.test_suite_basename)[0]) michael@0: self.rebaseline = rebaseline michael@0: self.performance_test = performance_test michael@0: self.cleanup_test_files = cleanup_test_files michael@0: self.tool = tool michael@0: if timeout == 0: michael@0: timeout = 60 michael@0: # On a VM (e.g. chromium buildbots), this timeout is way too small. michael@0: if os.environ.get('BUILDBOT_SLAVENAME'): michael@0: timeout = timeout * 2 michael@0: self.timeout = timeout * self.tool.GetTimeoutScale() michael@0: self.dump_debug_info = dump_debug_info michael@0: michael@0: def _BeginGetIOStats(self): michael@0: """Gets I/O statistics before running test. michael@0: michael@0: Return: michael@0: I/O stats object.The I/O stats object may be None if the test is not michael@0: performance test. michael@0: """ michael@0: initial_io_stats = None michael@0: # Try to get the disk I/O statistics for all performance tests. michael@0: if self.performance_test and not self.rebaseline: michael@0: initial_io_stats = self.adb.GetIoStats() michael@0: return initial_io_stats michael@0: michael@0: def _EndGetIOStats(self, initial_io_stats): michael@0: """Gets I/O statistics after running test and calcuate the I/O delta. michael@0: michael@0: Args: michael@0: initial_io_stats: I/O stats object got from _BeginGetIOStats. michael@0: michael@0: Return: michael@0: String for formated diso I/O statistics. michael@0: """ michael@0: disk_io = '' michael@0: if self.performance_test and initial_io_stats: michael@0: final_io_stats = self.adb.GetIoStats() michael@0: for stat in final_io_stats: michael@0: disk_io += '\n' + PrintPerfResult(stat, stat, michael@0: [final_io_stats[stat] - michael@0: initial_io_stats[stat]], michael@0: stat.split('_')[1], michael@0: print_to_stdout=False) michael@0: logging.info(disk_io) michael@0: return disk_io michael@0: michael@0: def GetDisabledPrefixes(self): michael@0: return ['DISABLED_', 'FLAKY_', 'FAILS_'] michael@0: michael@0: def _ParseGTestListTests(self, all_tests): michael@0: ret = [] michael@0: current = '' michael@0: disabled_prefixes = self.GetDisabledPrefixes() michael@0: for test in all_tests: michael@0: if not test: michael@0: continue michael@0: if test[0] != ' ' and not test.endswith('.'): michael@0: # Ignore any lines with unexpected format. michael@0: continue michael@0: if test[0] != ' ' and test.endswith('.'): michael@0: current = test michael@0: continue michael@0: if 'YOU HAVE' in test: michael@0: break michael@0: test_name = test[2:] michael@0: if not any([test_name.startswith(x) for x in disabled_prefixes]): michael@0: ret += [current + test_name] michael@0: return ret michael@0: michael@0: def PushDataAndPakFiles(self): michael@0: external_storage = self.adb.GetExternalStorage() michael@0: if (self.test_suite_basename == 'ui_unittests' or michael@0: self.test_suite_basename == 'unit_tests'): michael@0: self.adb.PushIfNeeded( michael@0: self.test_suite_dirname + '/chrome.pak', michael@0: external_storage + '/paks/chrome.pak') michael@0: self.adb.PushIfNeeded( michael@0: self.test_suite_dirname + '/locales/en-US.pak', michael@0: external_storage + '/paks/en-US.pak') michael@0: if self.test_suite_basename == 'unit_tests': michael@0: self.adb.PushIfNeeded( michael@0: self.test_suite_dirname + '/resources.pak', michael@0: external_storage + '/paks/resources.pak') michael@0: self.adb.PushIfNeeded( michael@0: self.test_suite_dirname + '/chrome_100_percent.pak', michael@0: external_storage + '/paks/chrome_100_percent.pak') michael@0: self.adb.PushIfNeeded(self.test_suite_dirname + '/test_data', michael@0: external_storage + '/test_data') michael@0: if self.test_suite_basename == 'content_unittests': michael@0: self.adb.PushIfNeeded( michael@0: self.test_suite_dirname + '/content_resources.pak', michael@0: external_storage + '/paks/content_resources.pak') michael@0: michael@0: def _WatchTestOutput(self, p): michael@0: """Watches the test output. michael@0: Args: michael@0: p: the process generating output as created by pexpect.spawn. michael@0: """ michael@0: ok_tests = [] michael@0: failed_tests = [] michael@0: crashed_tests = [] michael@0: timed_out = False michael@0: overall_fail = False michael@0: re_run = re.compile('\[ RUN \] ?(.*)\r\n') michael@0: # APK tests rely on the PASSED tag. michael@0: re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') michael@0: # Signal handlers are installed before starting tests michael@0: # to output the CRASHED marker when a crash happens. michael@0: re_crash = re.compile('\[ CRASHED \](.*)\r\n') michael@0: re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') michael@0: re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') michael@0: re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') michael@0: io_stats_before = self._BeginGetIOStats() michael@0: try: michael@0: while True: michael@0: found = p.expect([re_run, re_passed, re_runner_fail], michael@0: timeout=self.timeout) michael@0: if found == 1: # matched PASSED. michael@0: break michael@0: if found == 2: # RUNNER_FAILED michael@0: logging.error('RUNNER_FAILED') michael@0: overall_fail = True michael@0: break michael@0: if self.dump_debug_info: michael@0: self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') michael@0: full_test_name = p.match.group(1).replace('\r', '') michael@0: found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) michael@0: if found == 0: # re_ok michael@0: if full_test_name == p.match.group(1).replace('\r', ''): michael@0: ok_tests += [BaseTestResult(full_test_name, p.before)] michael@0: continue michael@0: if found == 2: # re_crash michael@0: crashed_tests += [BaseTestResult(full_test_name, p.before)] michael@0: overall_fail = True michael@0: break michael@0: # The test failed. michael@0: failed_tests += [BaseTestResult(full_test_name, p.before)] michael@0: except pexpect.EOF: michael@0: logging.error('Test terminated - EOF') michael@0: except pexpect.TIMEOUT: michael@0: logging.error('Test terminated after %d second timeout.', michael@0: self.timeout) michael@0: timed_out = True michael@0: finally: michael@0: p.close() michael@0: if not self.rebaseline: michael@0: ok_tests += self._EndGetIOStats(io_stats_before) michael@0: ret_code = self._GetGTestReturnCode() michael@0: if ret_code: michael@0: failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, michael@0: 'pexpect.before: %s' michael@0: '\npexpect.after: %s' michael@0: % (p.before, michael@0: p.after))] michael@0: # Create TestResults and return michael@0: return TestResults.FromRun(ok=ok_tests, failed=failed_tests, michael@0: crashed=crashed_tests, timed_out=timed_out, michael@0: overall_fail=overall_fail)