media/webrtc/trunk/build/android/run_tests.py

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/webrtc/trunk/build/android/run_tests.py	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,450 @@
     1.4 +#!/usr/bin/env python
     1.5 +#
     1.6 +# Copyright (c) 2012 The Chromium Authors. All rights reserved.
     1.7 +# Use of this source code is governed by a BSD-style license that can be
     1.8 +# found in the LICENSE file.
     1.9 +
    1.10 +"""Runs all the native unit tests.
    1.11 +
    1.12 +1. Copy over test binary to /data/local on device.
    1.13 +2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
    1.14 +   to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
    1.15 +   base dir (which maps to Context.getExternalFilesDir()).
    1.16 +3. Environment:
    1.17 +3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
    1.18 +     $EXTERNAL_STORAGE + /chrome/test/data
    1.19 +3.2. page_cycler_tests have following requirements,
    1.20 +3.2.1  the following data on host:
    1.21 +       <chrome_src_dir>/tools/page_cycler
    1.22 +       <chrome_src_dir>/data/page_cycler
    1.23 +3.2.2. two data directories to store above test data on device named:
    1.24 +       $EXTERNAL_STORAGE + /tools/ (for database perf test)
    1.25 +       $EXTERNAL_STORAGE + /data/ (for other perf tests)
    1.26 +3.2.3. a http server to serve http perf tests.
    1.27 +       The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000.
    1.28 +3.2.4  a tool named forwarder is also required to run on device to
    1.29 +       forward the http request/response between host and device.
    1.30 +3.2.5  Chrome is installed on device.
    1.31 +4. Run the binary in the device and stream the log to the host.
    1.32 +4.1. Optionally, filter specific tests.
    1.33 +4.2. Optionally, rebaseline: run the available tests and update the
    1.34 +     suppressions file for failures.
    1.35 +4.3. If we're running a single test suite and we have multiple devices
    1.36 +     connected, we'll shard the tests.
    1.37 +5. Clean up the device.
    1.38 +
    1.39 +Suppressions:
    1.40 +
    1.41 +Individual tests in a test binary can be suppressed by listing it in
    1.42 +the gtest_filter directory in a file of the same name as the test binary,
    1.43 +one test per line. Here is an example:
    1.44 +
    1.45 +  $ cat gtest_filter/base_unittests_disabled
    1.46 +  DataPackTest.Load
    1.47 +  ReadOnlyFileUtilTest.ContentsEqual
    1.48 +
    1.49 +This file is generated by the tests running on devices. If running on emulator,
    1.50 +additonal filter file which lists the tests only failed in emulator will be
    1.51 +loaded. We don't care about the rare testcases which succeeded on emuatlor, but
    1.52 +failed on device.
    1.53 +"""
    1.54 +
    1.55 +import fnmatch
    1.56 +import logging
    1.57 +import optparse
    1.58 +import os
    1.59 +import signal
    1.60 +import subprocess
    1.61 +import sys
    1.62 +import time
    1.63 +
    1.64 +from pylib import android_commands
    1.65 +from pylib.base_test_sharder import BaseTestSharder
    1.66 +from pylib import buildbot_report
    1.67 +from pylib import constants
    1.68 +from pylib import debug_info
    1.69 +import emulator
    1.70 +from pylib import ports
    1.71 +from pylib import run_tests_helper
    1.72 +from pylib import test_options_parser
    1.73 +from pylib.single_test_runner import SingleTestRunner
    1.74 +from pylib.test_result import BaseTestResult, TestResults
    1.75 +
    1.76 +
    1.77 +_TEST_SUITES = ['base_unittests',
    1.78 +                'content_unittests',
    1.79 +                'gpu_unittests',
    1.80 +                'ipc_tests',
    1.81 +                'media_unittests',
    1.82 +                'net_unittests',
    1.83 +                'sql_unittests',
    1.84 +                'sync_unit_tests',
    1.85 +                'ui_unittests',
    1.86 +                'unit_tests',
    1.87 +               ]
    1.88 +
    1.89 +
    1.90 +def TestSuiteDir(build_type):
    1.91 +  """Return the base directory of test suites."""
    1.92 +  return os.path.abspath(os.path.join(constants.CHROME_DIR, 'out', build_type))
    1.93 +
    1.94 +def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
    1.95 +  """Return a fully qualified list
    1.96 +
    1.97 +  Args:
    1.98 +    exe: if True, use the executable-based test runner.
    1.99 +    option_test_suite: the test_suite specified as an option.
   1.100 +    build_type: 'Release' or 'Debug'.
   1.101 +  """
   1.102 +  test_suite_dir = TestSuiteDir(build_type)
   1.103 +  if option_test_suite:
   1.104 +    all_test_suites = [option_test_suite]
   1.105 +  else:
   1.106 +    all_test_suites = _TEST_SUITES
   1.107 +
   1.108 +  if exe:
   1.109 +    qualified_test_suites = [os.path.join(test_suite_dir, t)
   1.110 +                             for t in all_test_suites]
   1.111 +  else:
   1.112 +    # out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
   1.113 +    qualified_test_suites = [os.path.join(test_suite_dir,
   1.114 +                                          t + '_apk',
   1.115 +                                          t + '-debug.apk')
   1.116 +                             for t in all_test_suites]
   1.117 +  for t, q in zip(all_test_suites, qualified_test_suites):
   1.118 +    if not os.path.exists(q):
   1.119 +      logging.critical('Test suite %s not found in %s.\n'
   1.120 +                       'Supported test suites:\n %s\n'
   1.121 +                       'Ensure it has been built.\n',
   1.122 +                       t, q, _TEST_SUITES)
   1.123 +      return []
   1.124 +  return qualified_test_suites
   1.125 +
   1.126 +
   1.127 +class TimeProfile(object):
   1.128 +  """Class for simple profiling of action, with logging of cost."""
   1.129 +
   1.130 +  def __init__(self, description):
   1.131 +    self._description = description
   1.132 +    self.Start()
   1.133 +
   1.134 +  def Start(self):
   1.135 +    self._starttime = time.time()
   1.136 +
   1.137 +  def Stop(self):
   1.138 +    """Stop profiling and dump a log."""
   1.139 +    if self._starttime:
   1.140 +      stoptime = time.time()
   1.141 +      logging.info('%fsec to perform %s',
   1.142 +                   stoptime - self._starttime, self._description)
   1.143 +      self._starttime = None
   1.144 +
   1.145 +
   1.146 +class Xvfb(object):
   1.147 +  """Class to start and stop Xvfb if relevant.  Nop if not Linux."""
   1.148 +
   1.149 +  def __init__(self):
   1.150 +    self._pid = 0
   1.151 +
   1.152 +  def _IsLinux(self):
   1.153 +    """Return True if on Linux; else False."""
   1.154 +    return sys.platform.startswith('linux')
   1.155 +
   1.156 +  def Start(self):
   1.157 +    """Start Xvfb and set an appropriate DISPLAY environment.  Linux only.
   1.158 +
   1.159 +    Copied from tools/code_coverage/coverage_posix.py
   1.160 +    """
   1.161 +    if not self._IsLinux():
   1.162 +      return
   1.163 +    proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
   1.164 +                             '-ac'],
   1.165 +                            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
   1.166 +    self._pid = proc.pid
   1.167 +    if not self._pid:
   1.168 +      raise Exception('Could not start Xvfb')
   1.169 +    os.environ['DISPLAY'] = ':9'
   1.170 +
   1.171 +    # Now confirm, giving a chance for it to start if needed.
   1.172 +    for _ in range(10):
   1.173 +      proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
   1.174 +      _, retcode = os.waitpid(proc.pid, 0)
   1.175 +      if retcode == 0:
   1.176 +        break
   1.177 +      time.sleep(0.25)
   1.178 +    if retcode != 0:
   1.179 +      raise Exception('Could not confirm Xvfb happiness')
   1.180 +
   1.181 +  def Stop(self):
   1.182 +    """Stop Xvfb if needed.  Linux only."""
   1.183 +    if self._pid:
   1.184 +      try:
   1.185 +        os.kill(self._pid, signal.SIGKILL)
   1.186 +      except:
   1.187 +        pass
   1.188 +      del os.environ['DISPLAY']
   1.189 +      self._pid = 0
   1.190 +
   1.191 +
   1.192 +class TestSharder(BaseTestSharder):
   1.193 +  """Responsible for sharding the tests on the connected devices."""
   1.194 +
   1.195 +  def __init__(self, attached_devices, test_suite, gtest_filter,
   1.196 +               test_arguments, timeout, rebaseline, performance_test,
   1.197 +               cleanup_test_files, tool, log_dump_name, fast_and_loose,
   1.198 +               build_type):
   1.199 +    BaseTestSharder.__init__(self, attached_devices)
   1.200 +    self.test_suite = test_suite
   1.201 +    self.test_suite_basename = os.path.basename(test_suite)
   1.202 +    self.gtest_filter = gtest_filter or ''
   1.203 +    self.test_arguments = test_arguments
   1.204 +    self.timeout = timeout
   1.205 +    self.rebaseline = rebaseline
   1.206 +    self.performance_test = performance_test
   1.207 +    self.cleanup_test_files = cleanup_test_files
   1.208 +    self.tool = tool
   1.209 +    self.log_dump_name = log_dump_name
   1.210 +    self.fast_and_loose = fast_and_loose
   1.211 +    self.build_type = build_type
   1.212 +    test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
   1.213 +                            test_arguments, timeout, rebaseline,
   1.214 +                            performance_test, cleanup_test_files, tool, 0,
   1.215 +                            not not self.log_dump_name, fast_and_loose,
   1.216 +                            build_type)
   1.217 +    self.tests = []
   1.218 +    if not self.gtest_filter:
   1.219 +      # No filter has been specified, let's add all tests then.
   1.220 +      # The executable/apk needs to be copied before we can call GetAllTests.
   1.221 +      test.test_package.StripAndCopyExecutable()
   1.222 +      all_tests = test.test_package.GetAllTests()
   1.223 +      if not rebaseline:
   1.224 +        disabled_list = test.GetDisabledTests()
   1.225 +        # Only includes tests that do not have any match in the disabled list.
   1.226 +        all_tests = filter(lambda t:
   1.227 +                           not any([fnmatch.fnmatch(t, disabled_pattern)
   1.228 +                                    for disabled_pattern in disabled_list]),
   1.229 +                           all_tests)
   1.230 +      self.tests = all_tests
   1.231 +
   1.232 +  def CreateShardedTestRunner(self, device, index):
   1.233 +    """Creates a suite-specific test runner.
   1.234 +
   1.235 +    Args:
   1.236 +      device: Device serial where this shard will run.
   1.237 +      index: Index of this device in the pool.
   1.238 +
   1.239 +    Returns:
   1.240 +      A SingleTestRunner object.
   1.241 +    """
   1.242 +    device_num = len(self.attached_devices)
   1.243 +    shard_size = (len(self.tests) + device_num - 1) / device_num
   1.244 +    shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
   1.245 +    test_filter = ':'.join(shard_test_list) + self.gtest_filter
   1.246 +    return SingleTestRunner(device, self.test_suite,
   1.247 +                            test_filter, self.test_arguments, self.timeout,
   1.248 +                            self.rebaseline, self.performance_test,
   1.249 +                            self.cleanup_test_files, self.tool, index,
   1.250 +                            not not self.log_dump_name, self.fast_and_loose,
   1.251 +                            self.build_type)
   1.252 +
   1.253 +  def OnTestsCompleted(self, test_runners, test_results):
   1.254 +    """Notifies that we completed the tests."""
   1.255 +    test_results.LogFull('Unit test', os.path.basename(self.test_suite),
   1.256 +                         self.build_type)
   1.257 +    test_results.PrintAnnotation()
   1.258 +    if test_results.failed and self.rebaseline:
   1.259 +      test_runners[0].UpdateFilter(test_results.failed)
   1.260 +    if self.log_dump_name:
   1.261 +      # Zip all debug info outputs into a file named by log_dump_name.
   1.262 +      debug_info.GTestDebugInfo.ZipAndCleanResults(
   1.263 +          os.path.join(TestSuiteDir(self.build_type), 'debug_info_dumps'),
   1.264 +          self.log_dump_name)
   1.265 +
   1.266 +
   1.267 +def _RunATestSuite(options):
   1.268 +  """Run a single test suite.
   1.269 +
   1.270 +  Helper for Dispatch() to allow stop/restart of the emulator across
   1.271 +  test bundles.  If using the emulator, we start it on entry and stop
   1.272 +  it on exit.
   1.273 +
   1.274 +  Args:
   1.275 +    options: options for running the tests.
   1.276 +
   1.277 +  Returns:
   1.278 +    0 if successful, number of failing tests otherwise.
   1.279 +  """
   1.280 +  step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
   1.281 +  buildbot_report.PrintNamedStep(step_name)
   1.282 +  attached_devices = []
   1.283 +  buildbot_emulators = []
   1.284 +
   1.285 +  if options.use_emulator:
   1.286 +    for n in range(options.emulator_count):
   1.287 +      t = TimeProfile('Emulator launch %d' % n)
   1.288 +      avd_name =  None
   1.289 +      if n > 0:
   1.290 +        # Creates a temporary AVD for the extra emulators.
   1.291 +        avd_name = 'run_tests_avd_%d' % n
   1.292 +      buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
   1.293 +      buildbot_emulator.Launch(kill_all_emulators=n == 0)
   1.294 +      t.Stop()
   1.295 +      buildbot_emulators.append(buildbot_emulator)
   1.296 +      attached_devices.append(buildbot_emulator.device)
   1.297 +    # Wait for all emulators to boot completed.
   1.298 +    map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
   1.299 +        buildbot_emulators)
   1.300 +  elif options.test_device:
   1.301 +    attached_devices = [options.test_device]
   1.302 +  else:
   1.303 +    attached_devices = android_commands.GetAttachedDevices()
   1.304 +
   1.305 +  if not attached_devices:
   1.306 +    logging.critical('A device must be attached and online.')
   1.307 +    buildbot_report.PrintError()
   1.308 +    return 1
   1.309 +
   1.310 +  # Reset the test port allocation. It's important to do it before starting
   1.311 +  # to dispatch any tests.
   1.312 +  if not ports.ResetTestServerPortAllocation():
   1.313 +    raise Exception('Failed to reset test server port.')
   1.314 +
   1.315 +  if options.performance_test or options.gtest_filter:
   1.316 +    # These configuration can't be split in multiple devices.
   1.317 +    attached_devices = [attached_devices[0]]
   1.318 +  sharder = TestSharder(attached_devices, options.test_suite,
   1.319 +                        options.gtest_filter, options.test_arguments,
   1.320 +                        options.timeout, options.rebaseline,
   1.321 +                        options.performance_test,
   1.322 +                        options.cleanup_test_files, options.tool,
   1.323 +                        options.log_dump, options.fast_and_loose,
   1.324 +                        options.build_type)
   1.325 +  test_results = sharder.RunShardedTests()
   1.326 +
   1.327 +  for buildbot_emulator in buildbot_emulators:
   1.328 +    buildbot_emulator.Shutdown()
   1.329 +
   1.330 +  # Another chance if we timed out?  At this point It is safe(r) to
   1.331 +  # run fast and loose since we just uploaded all the test data and
   1.332 +  # binary.
   1.333 +  if test_results.timed_out and options.repeat:
   1.334 +    logging.critical('Timed out; repeating in fast_and_loose mode.')
   1.335 +    options.fast_and_loose = True
   1.336 +    options.repeat -= 1
   1.337 +    logging.critical('Repeats left: ' + str(options.repeat))
   1.338 +    return _RunATestSuite(options)
   1.339 +  return len(test_results.failed)
   1.340 +
   1.341 +
   1.342 +def Dispatch(options):
   1.343 +  """Dispatches the tests, sharding if possible.
   1.344 +
   1.345 +  If options.use_emulator is True, all tests will be run in new emulator
   1.346 +  instance.
   1.347 +
   1.348 +  Args:
   1.349 +    options: options for running the tests.
   1.350 +
   1.351 +  Returns:
   1.352 +    0 if successful, number of failing tests otherwise.
   1.353 +  """
   1.354 +  if options.test_suite == 'help':
   1.355 +    ListTestSuites()
   1.356 +    return 0
   1.357 +
   1.358 +  if options.use_xvfb:
   1.359 +    xvfb = Xvfb()
   1.360 +    xvfb.Start()
   1.361 +
   1.362 +  all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
   1.363 +                                             options.build_type)
   1.364 +  failures = 0
   1.365 +  for suite in all_test_suites:
   1.366 +    options.test_suite = suite
   1.367 +    failures += _RunATestSuite(options)
   1.368 +
   1.369 +  if options.use_xvfb:
   1.370 +    xvfb.Stop()
   1.371 +  return failures
   1.372 +
   1.373 +
   1.374 +def ListTestSuites():
   1.375 +  """Display a list of available test suites."""
   1.376 +  print 'Available test suites are:'
   1.377 +  for test_suite in _TEST_SUITES:
   1.378 +    print test_suite
   1.379 +
   1.380 +
   1.381 +def main(argv):
   1.382 +  option_parser = optparse.OptionParser()
   1.383 +  test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
   1.384 +  option_parser.add_option('-s', '--suite', dest='test_suite',
   1.385 +                           help='Executable name of the test suite to run '
   1.386 +                           '(use -s help to list them)')
   1.387 +  option_parser.add_option('-d', '--device', dest='test_device',
   1.388 +                           help='Target device the test suite to run ')
   1.389 +  option_parser.add_option('-r', dest='rebaseline',
   1.390 +                           help='Rebaseline and update *testsuite_disabled',
   1.391 +                           action='store_true')
   1.392 +  option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
   1.393 +                           help='gtest filter')
   1.394 +  option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
   1.395 +                           help='Additional arguments to pass to the test')
   1.396 +  option_parser.add_option('-p', dest='performance_test',
   1.397 +                           help='Indicator of performance test',
   1.398 +                           action='store_true')
   1.399 +  option_parser.add_option('-L', dest='log_dump',
   1.400 +                           help='file name of log dump, which will be put in '
   1.401 +                           'subfolder debug_info_dumps under the same '
   1.402 +                           'directory in where the test_suite exists.')
   1.403 +  option_parser.add_option('-e', '--emulator', dest='use_emulator',
   1.404 +                           action='store_true',
   1.405 +                           help='Run tests in a new instance of emulator')
   1.406 +  option_parser.add_option('-n', '--emulator_count',
   1.407 +                           type='int', default=1,
   1.408 +                           help='Number of emulators to launch for running the '
   1.409 +                           'tests.')
   1.410 +  option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
   1.411 +                           action='store_true',
   1.412 +                           help='Use Xvfb around tests (ignored if not Linux)')
   1.413 +  option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
   1.414 +                           action='store_true',
   1.415 +                           help='Go faster (but be less stable), '
   1.416 +                           'for quick testing.  Example: when tracking down '
   1.417 +                           'tests that hang to add to the disabled list, '
   1.418 +                           'there is no need to redeploy the test binary '
   1.419 +                           'or data to the device again.  '
   1.420 +                           'Don\'t use on bots by default!')
   1.421 +  option_parser.add_option('--repeat', dest='repeat', type='int',
   1.422 +                           default=2,
   1.423 +                           help='Repeat count on test timeout')
   1.424 +  option_parser.add_option('--exit_code', action='store_true',
   1.425 +                           help='If set, the exit code will be total number '
   1.426 +                           'of failures.')
   1.427 +  option_parser.add_option('--exe', action='store_true',
   1.428 +                           help='If set, use the exe test runner instead of '
   1.429 +                           'the APK.')
   1.430 +
   1.431 +  options, args = option_parser.parse_args(argv)
   1.432 +  if len(args) > 1:
   1.433 +    print 'Unknown argument:', args[1:]
   1.434 +    option_parser.print_usage()
   1.435 +    sys.exit(1)
   1.436 +  run_tests_helper.SetLogLevel(options.verbose_count)
   1.437 +  emulator.DeleteAllTempAVDs()
   1.438 +  failed_tests_count = Dispatch(options)
   1.439 +
   1.440 +  # Failures of individual test suites are communicated by printing a
   1.441 +  # STEP_FAILURE message.
   1.442 +  # Returning a success exit status also prevents the buildbot from incorrectly
   1.443 +  # marking the last suite as failed if there were failures in other suites in
   1.444 +  # the batch (this happens because the exit status is a sum of all failures
   1.445 +  # from all suites, but the buildbot associates the exit status only with the
   1.446 +  # most recent step).
   1.447 +  if options.exit_code:
   1.448 +    return failed_tests_count
   1.449 +  return 0
   1.450 +
   1.451 +
   1.452 +if __name__ == '__main__':
   1.453 +  sys.exit(main(sys.argv))

mercurial