michael@0: #!/usr/bin/env python michael@0: # michael@0: # Copyright (c) 2012 The Chromium Authors. All rights reserved. michael@0: # Use of this source code is governed by a BSD-style license that can be michael@0: # found in the LICENSE file. michael@0: michael@0: """Runs all the native unit tests. michael@0: michael@0: 1. Copy over test binary to /data/local on device. michael@0: 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) michael@0: to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the michael@0: base dir (which maps to Context.getExternalFilesDir()). michael@0: 3. Environment: michael@0: 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named: michael@0: $EXTERNAL_STORAGE + /chrome/test/data michael@0: 3.2. page_cycler_tests have following requirements, michael@0: 3.2.1 the following data on host: michael@0: /tools/page_cycler michael@0: /data/page_cycler michael@0: 3.2.2. two data directories to store above test data on device named: michael@0: $EXTERNAL_STORAGE + /tools/ (for database perf test) michael@0: $EXTERNAL_STORAGE + /data/ (for other perf tests) michael@0: 3.2.3. a http server to serve http perf tests. michael@0: The http root is host's /data/page_cycler/, port 8000. michael@0: 3.2.4 a tool named forwarder is also required to run on device to michael@0: forward the http request/response between host and device. michael@0: 3.2.5 Chrome is installed on device. michael@0: 4. Run the binary in the device and stream the log to the host. michael@0: 4.1. Optionally, filter specific tests. michael@0: 4.2. Optionally, rebaseline: run the available tests and update the michael@0: suppressions file for failures. michael@0: 4.3. If we're running a single test suite and we have multiple devices michael@0: connected, we'll shard the tests. michael@0: 5. Clean up the device. michael@0: michael@0: Suppressions: michael@0: michael@0: Individual tests in a test binary can be suppressed by listing it in michael@0: the gtest_filter directory in a file of the same name as the test binary, michael@0: one test per line. Here is an example: michael@0: michael@0: $ cat gtest_filter/base_unittests_disabled michael@0: DataPackTest.Load michael@0: ReadOnlyFileUtilTest.ContentsEqual michael@0: michael@0: This file is generated by the tests running on devices. If running on emulator, michael@0: additonal filter file which lists the tests only failed in emulator will be michael@0: loaded. We don't care about the rare testcases which succeeded on emuatlor, but michael@0: failed on device. michael@0: """ michael@0: michael@0: import fnmatch michael@0: import logging michael@0: import optparse michael@0: import os michael@0: import signal michael@0: import subprocess michael@0: import sys michael@0: import time michael@0: michael@0: from pylib import android_commands michael@0: from pylib.base_test_sharder import BaseTestSharder michael@0: from pylib import buildbot_report michael@0: from pylib import constants michael@0: from pylib import debug_info michael@0: import emulator michael@0: from pylib import ports michael@0: from pylib import run_tests_helper michael@0: from pylib import test_options_parser michael@0: from pylib.single_test_runner import SingleTestRunner michael@0: from pylib.test_result import BaseTestResult, TestResults michael@0: michael@0: michael@0: _TEST_SUITES = ['base_unittests', michael@0: 'content_unittests', michael@0: 'gpu_unittests', michael@0: 'ipc_tests', michael@0: 'media_unittests', michael@0: 'net_unittests', michael@0: 'sql_unittests', michael@0: 'sync_unit_tests', michael@0: 'ui_unittests', michael@0: 'unit_tests', michael@0: ] michael@0: michael@0: michael@0: def TestSuiteDir(build_type): michael@0: """Return the base directory of test suites.""" michael@0: return os.path.abspath(os.path.join(constants.CHROME_DIR, 'out', build_type)) michael@0: michael@0: def FullyQualifiedTestSuites(exe, option_test_suite, build_type): michael@0: """Return a fully qualified list michael@0: michael@0: Args: michael@0: exe: if True, use the executable-based test runner. michael@0: option_test_suite: the test_suite specified as an option. michael@0: build_type: 'Release' or 'Debug'. michael@0: """ michael@0: test_suite_dir = TestSuiteDir(build_type) michael@0: if option_test_suite: michael@0: all_test_suites = [option_test_suite] michael@0: else: michael@0: all_test_suites = _TEST_SUITES michael@0: michael@0: if exe: michael@0: qualified_test_suites = [os.path.join(test_suite_dir, t) michael@0: for t in all_test_suites] michael@0: else: michael@0: # out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk michael@0: qualified_test_suites = [os.path.join(test_suite_dir, michael@0: t + '_apk', michael@0: t + '-debug.apk') michael@0: for t in all_test_suites] michael@0: for t, q in zip(all_test_suites, qualified_test_suites): michael@0: if not os.path.exists(q): michael@0: logging.critical('Test suite %s not found in %s.\n' michael@0: 'Supported test suites:\n %s\n' michael@0: 'Ensure it has been built.\n', michael@0: t, q, _TEST_SUITES) michael@0: return [] michael@0: return qualified_test_suites michael@0: michael@0: michael@0: class TimeProfile(object): michael@0: """Class for simple profiling of action, with logging of cost.""" michael@0: michael@0: def __init__(self, description): michael@0: self._description = description michael@0: self.Start() michael@0: michael@0: def Start(self): michael@0: self._starttime = time.time() michael@0: michael@0: def Stop(self): michael@0: """Stop profiling and dump a log.""" michael@0: if self._starttime: michael@0: stoptime = time.time() michael@0: logging.info('%fsec to perform %s', michael@0: stoptime - self._starttime, self._description) michael@0: self._starttime = None michael@0: michael@0: michael@0: class Xvfb(object): michael@0: """Class to start and stop Xvfb if relevant. Nop if not Linux.""" michael@0: michael@0: def __init__(self): michael@0: self._pid = 0 michael@0: michael@0: def _IsLinux(self): michael@0: """Return True if on Linux; else False.""" michael@0: return sys.platform.startswith('linux') michael@0: michael@0: def Start(self): michael@0: """Start Xvfb and set an appropriate DISPLAY environment. Linux only. michael@0: michael@0: Copied from tools/code_coverage/coverage_posix.py michael@0: """ michael@0: if not self._IsLinux(): michael@0: return michael@0: proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24', michael@0: '-ac'], michael@0: stdout=subprocess.PIPE, stderr=subprocess.STDOUT) michael@0: self._pid = proc.pid michael@0: if not self._pid: michael@0: raise Exception('Could not start Xvfb') michael@0: os.environ['DISPLAY'] = ':9' michael@0: michael@0: # Now confirm, giving a chance for it to start if needed. michael@0: for _ in range(10): michael@0: proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True) michael@0: _, retcode = os.waitpid(proc.pid, 0) michael@0: if retcode == 0: michael@0: break michael@0: time.sleep(0.25) michael@0: if retcode != 0: michael@0: raise Exception('Could not confirm Xvfb happiness') michael@0: michael@0: def Stop(self): michael@0: """Stop Xvfb if needed. Linux only.""" michael@0: if self._pid: michael@0: try: michael@0: os.kill(self._pid, signal.SIGKILL) michael@0: except: michael@0: pass michael@0: del os.environ['DISPLAY'] michael@0: self._pid = 0 michael@0: michael@0: michael@0: class TestSharder(BaseTestSharder): michael@0: """Responsible for sharding the tests on the connected devices.""" michael@0: michael@0: def __init__(self, attached_devices, test_suite, gtest_filter, michael@0: test_arguments, timeout, rebaseline, performance_test, michael@0: cleanup_test_files, tool, log_dump_name, fast_and_loose, michael@0: build_type): michael@0: BaseTestSharder.__init__(self, attached_devices) michael@0: self.test_suite = test_suite michael@0: self.test_suite_basename = os.path.basename(test_suite) michael@0: self.gtest_filter = gtest_filter or '' michael@0: self.test_arguments = test_arguments michael@0: self.timeout = timeout michael@0: self.rebaseline = rebaseline michael@0: self.performance_test = performance_test michael@0: self.cleanup_test_files = cleanup_test_files michael@0: self.tool = tool michael@0: self.log_dump_name = log_dump_name michael@0: self.fast_and_loose = fast_and_loose michael@0: self.build_type = build_type michael@0: test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, michael@0: test_arguments, timeout, rebaseline, michael@0: performance_test, cleanup_test_files, tool, 0, michael@0: not not self.log_dump_name, fast_and_loose, michael@0: build_type) michael@0: self.tests = [] michael@0: if not self.gtest_filter: michael@0: # No filter has been specified, let's add all tests then. michael@0: # The executable/apk needs to be copied before we can call GetAllTests. michael@0: test.test_package.StripAndCopyExecutable() michael@0: all_tests = test.test_package.GetAllTests() michael@0: if not rebaseline: michael@0: disabled_list = test.GetDisabledTests() michael@0: # Only includes tests that do not have any match in the disabled list. michael@0: all_tests = filter(lambda t: michael@0: not any([fnmatch.fnmatch(t, disabled_pattern) michael@0: for disabled_pattern in disabled_list]), michael@0: all_tests) michael@0: self.tests = all_tests michael@0: michael@0: def CreateShardedTestRunner(self, device, index): michael@0: """Creates a suite-specific test runner. michael@0: michael@0: Args: michael@0: device: Device serial where this shard will run. michael@0: index: Index of this device in the pool. michael@0: michael@0: Returns: michael@0: A SingleTestRunner object. michael@0: """ michael@0: device_num = len(self.attached_devices) michael@0: shard_size = (len(self.tests) + device_num - 1) / device_num michael@0: shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] michael@0: test_filter = ':'.join(shard_test_list) + self.gtest_filter michael@0: return SingleTestRunner(device, self.test_suite, michael@0: test_filter, self.test_arguments, self.timeout, michael@0: self.rebaseline, self.performance_test, michael@0: self.cleanup_test_files, self.tool, index, michael@0: not not self.log_dump_name, self.fast_and_loose, michael@0: self.build_type) michael@0: michael@0: def OnTestsCompleted(self, test_runners, test_results): michael@0: """Notifies that we completed the tests.""" michael@0: test_results.LogFull('Unit test', os.path.basename(self.test_suite), michael@0: self.build_type) michael@0: test_results.PrintAnnotation() michael@0: if test_results.failed and self.rebaseline: michael@0: test_runners[0].UpdateFilter(test_results.failed) michael@0: if self.log_dump_name: michael@0: # Zip all debug info outputs into a file named by log_dump_name. michael@0: debug_info.GTestDebugInfo.ZipAndCleanResults( michael@0: os.path.join(TestSuiteDir(self.build_type), 'debug_info_dumps'), michael@0: self.log_dump_name) michael@0: michael@0: michael@0: def _RunATestSuite(options): michael@0: """Run a single test suite. michael@0: michael@0: Helper for Dispatch() to allow stop/restart of the emulator across michael@0: test bundles. If using the emulator, we start it on entry and stop michael@0: it on exit. michael@0: michael@0: Args: michael@0: options: options for running the tests. michael@0: michael@0: Returns: michael@0: 0 if successful, number of failing tests otherwise. michael@0: """ michael@0: step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') michael@0: buildbot_report.PrintNamedStep(step_name) michael@0: attached_devices = [] michael@0: buildbot_emulators = [] michael@0: michael@0: if options.use_emulator: michael@0: for n in range(options.emulator_count): michael@0: t = TimeProfile('Emulator launch %d' % n) michael@0: avd_name = None michael@0: if n > 0: michael@0: # Creates a temporary AVD for the extra emulators. michael@0: avd_name = 'run_tests_avd_%d' % n michael@0: buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose) michael@0: buildbot_emulator.Launch(kill_all_emulators=n == 0) michael@0: t.Stop() michael@0: buildbot_emulators.append(buildbot_emulator) michael@0: attached_devices.append(buildbot_emulator.device) michael@0: # Wait for all emulators to boot completed. michael@0: map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), michael@0: buildbot_emulators) michael@0: elif options.test_device: michael@0: attached_devices = [options.test_device] michael@0: else: michael@0: attached_devices = android_commands.GetAttachedDevices() michael@0: michael@0: if not attached_devices: michael@0: logging.critical('A device must be attached and online.') michael@0: buildbot_report.PrintError() michael@0: return 1 michael@0: michael@0: # Reset the test port allocation. It's important to do it before starting michael@0: # to dispatch any tests. michael@0: if not ports.ResetTestServerPortAllocation(): michael@0: raise Exception('Failed to reset test server port.') michael@0: michael@0: if options.performance_test or options.gtest_filter: michael@0: # These configuration can't be split in multiple devices. michael@0: attached_devices = [attached_devices[0]] michael@0: sharder = TestSharder(attached_devices, options.test_suite, michael@0: options.gtest_filter, options.test_arguments, michael@0: options.timeout, options.rebaseline, michael@0: options.performance_test, michael@0: options.cleanup_test_files, options.tool, michael@0: options.log_dump, options.fast_and_loose, michael@0: options.build_type) michael@0: test_results = sharder.RunShardedTests() michael@0: michael@0: for buildbot_emulator in buildbot_emulators: michael@0: buildbot_emulator.Shutdown() michael@0: michael@0: # Another chance if we timed out? At this point It is safe(r) to michael@0: # run fast and loose since we just uploaded all the test data and michael@0: # binary. michael@0: if test_results.timed_out and options.repeat: michael@0: logging.critical('Timed out; repeating in fast_and_loose mode.') michael@0: options.fast_and_loose = True michael@0: options.repeat -= 1 michael@0: logging.critical('Repeats left: ' + str(options.repeat)) michael@0: return _RunATestSuite(options) michael@0: return len(test_results.failed) michael@0: michael@0: michael@0: def Dispatch(options): michael@0: """Dispatches the tests, sharding if possible. michael@0: michael@0: If options.use_emulator is True, all tests will be run in new emulator michael@0: instance. michael@0: michael@0: Args: michael@0: options: options for running the tests. michael@0: michael@0: Returns: michael@0: 0 if successful, number of failing tests otherwise. michael@0: """ michael@0: if options.test_suite == 'help': michael@0: ListTestSuites() michael@0: return 0 michael@0: michael@0: if options.use_xvfb: michael@0: xvfb = Xvfb() michael@0: xvfb.Start() michael@0: michael@0: all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite, michael@0: options.build_type) michael@0: failures = 0 michael@0: for suite in all_test_suites: michael@0: options.test_suite = suite michael@0: failures += _RunATestSuite(options) michael@0: michael@0: if options.use_xvfb: michael@0: xvfb.Stop() michael@0: return failures michael@0: michael@0: michael@0: def ListTestSuites(): michael@0: """Display a list of available test suites.""" michael@0: print 'Available test suites are:' michael@0: for test_suite in _TEST_SUITES: michael@0: print test_suite michael@0: michael@0: michael@0: def main(argv): michael@0: option_parser = optparse.OptionParser() michael@0: test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0) michael@0: option_parser.add_option('-s', '--suite', dest='test_suite', michael@0: help='Executable name of the test suite to run ' michael@0: '(use -s help to list them)') michael@0: option_parser.add_option('-d', '--device', dest='test_device', michael@0: help='Target device the test suite to run ') michael@0: option_parser.add_option('-r', dest='rebaseline', michael@0: help='Rebaseline and update *testsuite_disabled', michael@0: action='store_true') michael@0: option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', michael@0: help='gtest filter') michael@0: option_parser.add_option('-a', '--test_arguments', dest='test_arguments', michael@0: help='Additional arguments to pass to the test') michael@0: option_parser.add_option('-p', dest='performance_test', michael@0: help='Indicator of performance test', michael@0: action='store_true') michael@0: option_parser.add_option('-L', dest='log_dump', michael@0: help='file name of log dump, which will be put in ' michael@0: 'subfolder debug_info_dumps under the same ' michael@0: 'directory in where the test_suite exists.') michael@0: option_parser.add_option('-e', '--emulator', dest='use_emulator', michael@0: action='store_true', michael@0: help='Run tests in a new instance of emulator') michael@0: option_parser.add_option('-n', '--emulator_count', michael@0: type='int', default=1, michael@0: help='Number of emulators to launch for running the ' michael@0: 'tests.') michael@0: option_parser.add_option('-x', '--xvfb', dest='use_xvfb', michael@0: action='store_true', michael@0: help='Use Xvfb around tests (ignored if not Linux)') michael@0: option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose', michael@0: action='store_true', michael@0: help='Go faster (but be less stable), ' michael@0: 'for quick testing. Example: when tracking down ' michael@0: 'tests that hang to add to the disabled list, ' michael@0: 'there is no need to redeploy the test binary ' michael@0: 'or data to the device again. ' michael@0: 'Don\'t use on bots by default!') michael@0: option_parser.add_option('--repeat', dest='repeat', type='int', michael@0: default=2, michael@0: help='Repeat count on test timeout') michael@0: option_parser.add_option('--exit_code', action='store_true', michael@0: help='If set, the exit code will be total number ' michael@0: 'of failures.') michael@0: option_parser.add_option('--exe', action='store_true', michael@0: help='If set, use the exe test runner instead of ' michael@0: 'the APK.') michael@0: michael@0: options, args = option_parser.parse_args(argv) michael@0: if len(args) > 1: michael@0: print 'Unknown argument:', args[1:] michael@0: option_parser.print_usage() michael@0: sys.exit(1) michael@0: run_tests_helper.SetLogLevel(options.verbose_count) michael@0: emulator.DeleteAllTempAVDs() michael@0: failed_tests_count = Dispatch(options) michael@0: michael@0: # Failures of individual test suites are communicated by printing a michael@0: # STEP_FAILURE message. michael@0: # Returning a success exit status also prevents the buildbot from incorrectly michael@0: # marking the last suite as failed if there were failures in other suites in michael@0: # the batch (this happens because the exit status is a sum of all failures michael@0: # from all suites, but the buildbot associates the exit status only with the michael@0: # most recent step). michael@0: if options.exit_code: michael@0: return failed_tests_count michael@0: return 0 michael@0: michael@0: michael@0: if __name__ == '__main__': michael@0: sys.exit(main(sys.argv))