media/webrtc/trunk/build/android/run_tests.py

Wed, 31 Dec 2014 13:27:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 13:27:57 +0100
branch
TOR_BUG_3246
changeset 6
8bccb770b82d
permissions
-rwxr-xr-x

Ignore runtime configuration files generated during quality assurance.

     1 #!/usr/bin/env python
     2 #
     3 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
     4 # Use of this source code is governed by a BSD-style license that can be
     5 # found in the LICENSE file.
     7 """Runs all the native unit tests.
     9 1. Copy over test binary to /data/local on device.
    10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
    11    to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
    12    base dir (which maps to Context.getExternalFilesDir()).
    13 3. Environment:
    14 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
    15      $EXTERNAL_STORAGE + /chrome/test/data
    16 3.2. page_cycler_tests have following requirements,
    17 3.2.1  the following data on host:
    18        <chrome_src_dir>/tools/page_cycler
    19        <chrome_src_dir>/data/page_cycler
    20 3.2.2. two data directories to store above test data on device named:
    21        $EXTERNAL_STORAGE + /tools/ (for database perf test)
    22        $EXTERNAL_STORAGE + /data/ (for other perf tests)
    23 3.2.3. a http server to serve http perf tests.
    24        The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000.
    25 3.2.4  a tool named forwarder is also required to run on device to
    26        forward the http request/response between host and device.
    27 3.2.5  Chrome is installed on device.
    28 4. Run the binary in the device and stream the log to the host.
    29 4.1. Optionally, filter specific tests.
    30 4.2. Optionally, rebaseline: run the available tests and update the
    31      suppressions file for failures.
    32 4.3. If we're running a single test suite and we have multiple devices
    33      connected, we'll shard the tests.
    34 5. Clean up the device.
    36 Suppressions:
    38 Individual tests in a test binary can be suppressed by listing it in
    39 the gtest_filter directory in a file of the same name as the test binary,
    40 one test per line. Here is an example:
    42   $ cat gtest_filter/base_unittests_disabled
    43   DataPackTest.Load
    44   ReadOnlyFileUtilTest.ContentsEqual
    46 This file is generated by the tests running on devices. If running on emulator,
    47 additonal filter file which lists the tests only failed in emulator will be
    48 loaded. We don't care about the rare testcases which succeeded on emuatlor, but
    49 failed on device.
    50 """
    52 import fnmatch
    53 import logging
    54 import optparse
    55 import os
    56 import signal
    57 import subprocess
    58 import sys
    59 import time
    61 from pylib import android_commands
    62 from pylib.base_test_sharder import BaseTestSharder
    63 from pylib import buildbot_report
    64 from pylib import constants
    65 from pylib import debug_info
    66 import emulator
    67 from pylib import ports
    68 from pylib import run_tests_helper
    69 from pylib import test_options_parser
    70 from pylib.single_test_runner import SingleTestRunner
    71 from pylib.test_result import BaseTestResult, TestResults
    74 _TEST_SUITES = ['base_unittests',
    75                 'content_unittests',
    76                 'gpu_unittests',
    77                 'ipc_tests',
    78                 'media_unittests',
    79                 'net_unittests',
    80                 'sql_unittests',
    81                 'sync_unit_tests',
    82                 'ui_unittests',
    83                 'unit_tests',
    84                ]
    87 def TestSuiteDir(build_type):
    88   """Return the base directory of test suites."""
    89   return os.path.abspath(os.path.join(constants.CHROME_DIR, 'out', build_type))
    91 def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
    92   """Return a fully qualified list
    94   Args:
    95     exe: if True, use the executable-based test runner.
    96     option_test_suite: the test_suite specified as an option.
    97     build_type: 'Release' or 'Debug'.
    98   """
    99   test_suite_dir = TestSuiteDir(build_type)
   100   if option_test_suite:
   101     all_test_suites = [option_test_suite]
   102   else:
   103     all_test_suites = _TEST_SUITES
   105   if exe:
   106     qualified_test_suites = [os.path.join(test_suite_dir, t)
   107                              for t in all_test_suites]
   108   else:
   109     # out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
   110     qualified_test_suites = [os.path.join(test_suite_dir,
   111                                           t + '_apk',
   112                                           t + '-debug.apk')
   113                              for t in all_test_suites]
   114   for t, q in zip(all_test_suites, qualified_test_suites):
   115     if not os.path.exists(q):
   116       logging.critical('Test suite %s not found in %s.\n'
   117                        'Supported test suites:\n %s\n'
   118                        'Ensure it has been built.\n',
   119                        t, q, _TEST_SUITES)
   120       return []
   121   return qualified_test_suites
   124 class TimeProfile(object):
   125   """Class for simple profiling of action, with logging of cost."""
   127   def __init__(self, description):
   128     self._description = description
   129     self.Start()
   131   def Start(self):
   132     self._starttime = time.time()
   134   def Stop(self):
   135     """Stop profiling and dump a log."""
   136     if self._starttime:
   137       stoptime = time.time()
   138       logging.info('%fsec to perform %s',
   139                    stoptime - self._starttime, self._description)
   140       self._starttime = None
   143 class Xvfb(object):
   144   """Class to start and stop Xvfb if relevant.  Nop if not Linux."""
   146   def __init__(self):
   147     self._pid = 0
   149   def _IsLinux(self):
   150     """Return True if on Linux; else False."""
   151     return sys.platform.startswith('linux')
   153   def Start(self):
   154     """Start Xvfb and set an appropriate DISPLAY environment.  Linux only.
   156     Copied from tools/code_coverage/coverage_posix.py
   157     """
   158     if not self._IsLinux():
   159       return
   160     proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
   161                              '-ac'],
   162                             stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
   163     self._pid = proc.pid
   164     if not self._pid:
   165       raise Exception('Could not start Xvfb')
   166     os.environ['DISPLAY'] = ':9'
   168     # Now confirm, giving a chance for it to start if needed.
   169     for _ in range(10):
   170       proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
   171       _, retcode = os.waitpid(proc.pid, 0)
   172       if retcode == 0:
   173         break
   174       time.sleep(0.25)
   175     if retcode != 0:
   176       raise Exception('Could not confirm Xvfb happiness')
   178   def Stop(self):
   179     """Stop Xvfb if needed.  Linux only."""
   180     if self._pid:
   181       try:
   182         os.kill(self._pid, signal.SIGKILL)
   183       except:
   184         pass
   185       del os.environ['DISPLAY']
   186       self._pid = 0
   189 class TestSharder(BaseTestSharder):
   190   """Responsible for sharding the tests on the connected devices."""
   192   def __init__(self, attached_devices, test_suite, gtest_filter,
   193                test_arguments, timeout, rebaseline, performance_test,
   194                cleanup_test_files, tool, log_dump_name, fast_and_loose,
   195                build_type):
   196     BaseTestSharder.__init__(self, attached_devices)
   197     self.test_suite = test_suite
   198     self.test_suite_basename = os.path.basename(test_suite)
   199     self.gtest_filter = gtest_filter or ''
   200     self.test_arguments = test_arguments
   201     self.timeout = timeout
   202     self.rebaseline = rebaseline
   203     self.performance_test = performance_test
   204     self.cleanup_test_files = cleanup_test_files
   205     self.tool = tool
   206     self.log_dump_name = log_dump_name
   207     self.fast_and_loose = fast_and_loose
   208     self.build_type = build_type
   209     test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
   210                             test_arguments, timeout, rebaseline,
   211                             performance_test, cleanup_test_files, tool, 0,
   212                             not not self.log_dump_name, fast_and_loose,
   213                             build_type)
   214     self.tests = []
   215     if not self.gtest_filter:
   216       # No filter has been specified, let's add all tests then.
   217       # The executable/apk needs to be copied before we can call GetAllTests.
   218       test.test_package.StripAndCopyExecutable()
   219       all_tests = test.test_package.GetAllTests()
   220       if not rebaseline:
   221         disabled_list = test.GetDisabledTests()
   222         # Only includes tests that do not have any match in the disabled list.
   223         all_tests = filter(lambda t:
   224                            not any([fnmatch.fnmatch(t, disabled_pattern)
   225                                     for disabled_pattern in disabled_list]),
   226                            all_tests)
   227       self.tests = all_tests
   229   def CreateShardedTestRunner(self, device, index):
   230     """Creates a suite-specific test runner.
   232     Args:
   233       device: Device serial where this shard will run.
   234       index: Index of this device in the pool.
   236     Returns:
   237       A SingleTestRunner object.
   238     """
   239     device_num = len(self.attached_devices)
   240     shard_size = (len(self.tests) + device_num - 1) / device_num
   241     shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
   242     test_filter = ':'.join(shard_test_list) + self.gtest_filter
   243     return SingleTestRunner(device, self.test_suite,
   244                             test_filter, self.test_arguments, self.timeout,
   245                             self.rebaseline, self.performance_test,
   246                             self.cleanup_test_files, self.tool, index,
   247                             not not self.log_dump_name, self.fast_and_loose,
   248                             self.build_type)
   250   def OnTestsCompleted(self, test_runners, test_results):
   251     """Notifies that we completed the tests."""
   252     test_results.LogFull('Unit test', os.path.basename(self.test_suite),
   253                          self.build_type)
   254     test_results.PrintAnnotation()
   255     if test_results.failed and self.rebaseline:
   256       test_runners[0].UpdateFilter(test_results.failed)
   257     if self.log_dump_name:
   258       # Zip all debug info outputs into a file named by log_dump_name.
   259       debug_info.GTestDebugInfo.ZipAndCleanResults(
   260           os.path.join(TestSuiteDir(self.build_type), 'debug_info_dumps'),
   261           self.log_dump_name)
   264 def _RunATestSuite(options):
   265   """Run a single test suite.
   267   Helper for Dispatch() to allow stop/restart of the emulator across
   268   test bundles.  If using the emulator, we start it on entry and stop
   269   it on exit.
   271   Args:
   272     options: options for running the tests.
   274   Returns:
   275     0 if successful, number of failing tests otherwise.
   276   """
   277   step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
   278   buildbot_report.PrintNamedStep(step_name)
   279   attached_devices = []
   280   buildbot_emulators = []
   282   if options.use_emulator:
   283     for n in range(options.emulator_count):
   284       t = TimeProfile('Emulator launch %d' % n)
   285       avd_name =  None
   286       if n > 0:
   287         # Creates a temporary AVD for the extra emulators.
   288         avd_name = 'run_tests_avd_%d' % n
   289       buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
   290       buildbot_emulator.Launch(kill_all_emulators=n == 0)
   291       t.Stop()
   292       buildbot_emulators.append(buildbot_emulator)
   293       attached_devices.append(buildbot_emulator.device)
   294     # Wait for all emulators to boot completed.
   295     map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
   296         buildbot_emulators)
   297   elif options.test_device:
   298     attached_devices = [options.test_device]
   299   else:
   300     attached_devices = android_commands.GetAttachedDevices()
   302   if not attached_devices:
   303     logging.critical('A device must be attached and online.')
   304     buildbot_report.PrintError()
   305     return 1
   307   # Reset the test port allocation. It's important to do it before starting
   308   # to dispatch any tests.
   309   if not ports.ResetTestServerPortAllocation():
   310     raise Exception('Failed to reset test server port.')
   312   if options.performance_test or options.gtest_filter:
   313     # These configuration can't be split in multiple devices.
   314     attached_devices = [attached_devices[0]]
   315   sharder = TestSharder(attached_devices, options.test_suite,
   316                         options.gtest_filter, options.test_arguments,
   317                         options.timeout, options.rebaseline,
   318                         options.performance_test,
   319                         options.cleanup_test_files, options.tool,
   320                         options.log_dump, options.fast_and_loose,
   321                         options.build_type)
   322   test_results = sharder.RunShardedTests()
   324   for buildbot_emulator in buildbot_emulators:
   325     buildbot_emulator.Shutdown()
   327   # Another chance if we timed out?  At this point It is safe(r) to
   328   # run fast and loose since we just uploaded all the test data and
   329   # binary.
   330   if test_results.timed_out and options.repeat:
   331     logging.critical('Timed out; repeating in fast_and_loose mode.')
   332     options.fast_and_loose = True
   333     options.repeat -= 1
   334     logging.critical('Repeats left: ' + str(options.repeat))
   335     return _RunATestSuite(options)
   336   return len(test_results.failed)
   339 def Dispatch(options):
   340   """Dispatches the tests, sharding if possible.
   342   If options.use_emulator is True, all tests will be run in new emulator
   343   instance.
   345   Args:
   346     options: options for running the tests.
   348   Returns:
   349     0 if successful, number of failing tests otherwise.
   350   """
   351   if options.test_suite == 'help':
   352     ListTestSuites()
   353     return 0
   355   if options.use_xvfb:
   356     xvfb = Xvfb()
   357     xvfb.Start()
   359   all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
   360                                              options.build_type)
   361   failures = 0
   362   for suite in all_test_suites:
   363     options.test_suite = suite
   364     failures += _RunATestSuite(options)
   366   if options.use_xvfb:
   367     xvfb.Stop()
   368   return failures
   371 def ListTestSuites():
   372   """Display a list of available test suites."""
   373   print 'Available test suites are:'
   374   for test_suite in _TEST_SUITES:
   375     print test_suite
   378 def main(argv):
   379   option_parser = optparse.OptionParser()
   380   test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
   381   option_parser.add_option('-s', '--suite', dest='test_suite',
   382                            help='Executable name of the test suite to run '
   383                            '(use -s help to list them)')
   384   option_parser.add_option('-d', '--device', dest='test_device',
   385                            help='Target device the test suite to run ')
   386   option_parser.add_option('-r', dest='rebaseline',
   387                            help='Rebaseline and update *testsuite_disabled',
   388                            action='store_true')
   389   option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
   390                            help='gtest filter')
   391   option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
   392                            help='Additional arguments to pass to the test')
   393   option_parser.add_option('-p', dest='performance_test',
   394                            help='Indicator of performance test',
   395                            action='store_true')
   396   option_parser.add_option('-L', dest='log_dump',
   397                            help='file name of log dump, which will be put in '
   398                            'subfolder debug_info_dumps under the same '
   399                            'directory in where the test_suite exists.')
   400   option_parser.add_option('-e', '--emulator', dest='use_emulator',
   401                            action='store_true',
   402                            help='Run tests in a new instance of emulator')
   403   option_parser.add_option('-n', '--emulator_count',
   404                            type='int', default=1,
   405                            help='Number of emulators to launch for running the '
   406                            'tests.')
   407   option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
   408                            action='store_true',
   409                            help='Use Xvfb around tests (ignored if not Linux)')
   410   option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
   411                            action='store_true',
   412                            help='Go faster (but be less stable), '
   413                            'for quick testing.  Example: when tracking down '
   414                            'tests that hang to add to the disabled list, '
   415                            'there is no need to redeploy the test binary '
   416                            'or data to the device again.  '
   417                            'Don\'t use on bots by default!')
   418   option_parser.add_option('--repeat', dest='repeat', type='int',
   419                            default=2,
   420                            help='Repeat count on test timeout')
   421   option_parser.add_option('--exit_code', action='store_true',
   422                            help='If set, the exit code will be total number '
   423                            'of failures.')
   424   option_parser.add_option('--exe', action='store_true',
   425                            help='If set, use the exe test runner instead of '
   426                            'the APK.')
   428   options, args = option_parser.parse_args(argv)
   429   if len(args) > 1:
   430     print 'Unknown argument:', args[1:]
   431     option_parser.print_usage()
   432     sys.exit(1)
   433   run_tests_helper.SetLogLevel(options.verbose_count)
   434   emulator.DeleteAllTempAVDs()
   435   failed_tests_count = Dispatch(options)
   437   # Failures of individual test suites are communicated by printing a
   438   # STEP_FAILURE message.
   439   # Returning a success exit status also prevents the buildbot from incorrectly
   440   # marking the last suite as failed if there were failures in other suites in
   441   # the batch (this happens because the exit status is a sum of all failures
   442   # from all suites, but the buildbot associates the exit status only with the
   443   # most recent step).
   444   if options.exit_code:
   445     return failed_tests_count
   446   return 0
   449 if __name__ == '__main__':
   450   sys.exit(main(sys.argv))

mercurial