media/webrtc/trunk/build/android/run_tests.py

Wed, 31 Dec 2014 13:27:57 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 13:27:57 +0100
branch
TOR_BUG_3246
changeset 6
8bccb770b82d
permissions
-rwxr-xr-x

Ignore runtime configuration files generated during quality assurance.

michael@0 1 #!/usr/bin/env python
michael@0 2 #
michael@0 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
michael@0 4 # Use of this source code is governed by a BSD-style license that can be
michael@0 5 # found in the LICENSE file.
michael@0 6
michael@0 7 """Runs all the native unit tests.
michael@0 8
michael@0 9 1. Copy over test binary to /data/local on device.
michael@0 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
michael@0 11 to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
michael@0 12 base dir (which maps to Context.getExternalFilesDir()).
michael@0 13 3. Environment:
michael@0 14 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
michael@0 15 $EXTERNAL_STORAGE + /chrome/test/data
michael@0 16 3.2. page_cycler_tests have following requirements,
michael@0 17 3.2.1 the following data on host:
michael@0 18 <chrome_src_dir>/tools/page_cycler
michael@0 19 <chrome_src_dir>/data/page_cycler
michael@0 20 3.2.2. two data directories to store above test data on device named:
michael@0 21 $EXTERNAL_STORAGE + /tools/ (for database perf test)
michael@0 22 $EXTERNAL_STORAGE + /data/ (for other perf tests)
michael@0 23 3.2.3. a http server to serve http perf tests.
michael@0 24 The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000.
michael@0 25 3.2.4 a tool named forwarder is also required to run on device to
michael@0 26 forward the http request/response between host and device.
michael@0 27 3.2.5 Chrome is installed on device.
michael@0 28 4. Run the binary in the device and stream the log to the host.
michael@0 29 4.1. Optionally, filter specific tests.
michael@0 30 4.2. Optionally, rebaseline: run the available tests and update the
michael@0 31 suppressions file for failures.
michael@0 32 4.3. If we're running a single test suite and we have multiple devices
michael@0 33 connected, we'll shard the tests.
michael@0 34 5. Clean up the device.
michael@0 35
michael@0 36 Suppressions:
michael@0 37
michael@0 38 Individual tests in a test binary can be suppressed by listing it in
michael@0 39 the gtest_filter directory in a file of the same name as the test binary,
michael@0 40 one test per line. Here is an example:
michael@0 41
michael@0 42 $ cat gtest_filter/base_unittests_disabled
michael@0 43 DataPackTest.Load
michael@0 44 ReadOnlyFileUtilTest.ContentsEqual
michael@0 45
michael@0 46 This file is generated by the tests running on devices. If running on emulator,
michael@0 47 additonal filter file which lists the tests only failed in emulator will be
michael@0 48 loaded. We don't care about the rare testcases which succeeded on emuatlor, but
michael@0 49 failed on device.
michael@0 50 """
michael@0 51
michael@0 52 import fnmatch
michael@0 53 import logging
michael@0 54 import optparse
michael@0 55 import os
michael@0 56 import signal
michael@0 57 import subprocess
michael@0 58 import sys
michael@0 59 import time
michael@0 60
michael@0 61 from pylib import android_commands
michael@0 62 from pylib.base_test_sharder import BaseTestSharder
michael@0 63 from pylib import buildbot_report
michael@0 64 from pylib import constants
michael@0 65 from pylib import debug_info
michael@0 66 import emulator
michael@0 67 from pylib import ports
michael@0 68 from pylib import run_tests_helper
michael@0 69 from pylib import test_options_parser
michael@0 70 from pylib.single_test_runner import SingleTestRunner
michael@0 71 from pylib.test_result import BaseTestResult, TestResults
michael@0 72
michael@0 73
michael@0 74 _TEST_SUITES = ['base_unittests',
michael@0 75 'content_unittests',
michael@0 76 'gpu_unittests',
michael@0 77 'ipc_tests',
michael@0 78 'media_unittests',
michael@0 79 'net_unittests',
michael@0 80 'sql_unittests',
michael@0 81 'sync_unit_tests',
michael@0 82 'ui_unittests',
michael@0 83 'unit_tests',
michael@0 84 ]
michael@0 85
michael@0 86
michael@0 87 def TestSuiteDir(build_type):
michael@0 88 """Return the base directory of test suites."""
michael@0 89 return os.path.abspath(os.path.join(constants.CHROME_DIR, 'out', build_type))
michael@0 90
michael@0 91 def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
michael@0 92 """Return a fully qualified list
michael@0 93
michael@0 94 Args:
michael@0 95 exe: if True, use the executable-based test runner.
michael@0 96 option_test_suite: the test_suite specified as an option.
michael@0 97 build_type: 'Release' or 'Debug'.
michael@0 98 """
michael@0 99 test_suite_dir = TestSuiteDir(build_type)
michael@0 100 if option_test_suite:
michael@0 101 all_test_suites = [option_test_suite]
michael@0 102 else:
michael@0 103 all_test_suites = _TEST_SUITES
michael@0 104
michael@0 105 if exe:
michael@0 106 qualified_test_suites = [os.path.join(test_suite_dir, t)
michael@0 107 for t in all_test_suites]
michael@0 108 else:
michael@0 109 # out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
michael@0 110 qualified_test_suites = [os.path.join(test_suite_dir,
michael@0 111 t + '_apk',
michael@0 112 t + '-debug.apk')
michael@0 113 for t in all_test_suites]
michael@0 114 for t, q in zip(all_test_suites, qualified_test_suites):
michael@0 115 if not os.path.exists(q):
michael@0 116 logging.critical('Test suite %s not found in %s.\n'
michael@0 117 'Supported test suites:\n %s\n'
michael@0 118 'Ensure it has been built.\n',
michael@0 119 t, q, _TEST_SUITES)
michael@0 120 return []
michael@0 121 return qualified_test_suites
michael@0 122
michael@0 123
michael@0 124 class TimeProfile(object):
michael@0 125 """Class for simple profiling of action, with logging of cost."""
michael@0 126
michael@0 127 def __init__(self, description):
michael@0 128 self._description = description
michael@0 129 self.Start()
michael@0 130
michael@0 131 def Start(self):
michael@0 132 self._starttime = time.time()
michael@0 133
michael@0 134 def Stop(self):
michael@0 135 """Stop profiling and dump a log."""
michael@0 136 if self._starttime:
michael@0 137 stoptime = time.time()
michael@0 138 logging.info('%fsec to perform %s',
michael@0 139 stoptime - self._starttime, self._description)
michael@0 140 self._starttime = None
michael@0 141
michael@0 142
michael@0 143 class Xvfb(object):
michael@0 144 """Class to start and stop Xvfb if relevant. Nop if not Linux."""
michael@0 145
michael@0 146 def __init__(self):
michael@0 147 self._pid = 0
michael@0 148
michael@0 149 def _IsLinux(self):
michael@0 150 """Return True if on Linux; else False."""
michael@0 151 return sys.platform.startswith('linux')
michael@0 152
michael@0 153 def Start(self):
michael@0 154 """Start Xvfb and set an appropriate DISPLAY environment. Linux only.
michael@0 155
michael@0 156 Copied from tools/code_coverage/coverage_posix.py
michael@0 157 """
michael@0 158 if not self._IsLinux():
michael@0 159 return
michael@0 160 proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
michael@0 161 '-ac'],
michael@0 162 stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
michael@0 163 self._pid = proc.pid
michael@0 164 if not self._pid:
michael@0 165 raise Exception('Could not start Xvfb')
michael@0 166 os.environ['DISPLAY'] = ':9'
michael@0 167
michael@0 168 # Now confirm, giving a chance for it to start if needed.
michael@0 169 for _ in range(10):
michael@0 170 proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
michael@0 171 _, retcode = os.waitpid(proc.pid, 0)
michael@0 172 if retcode == 0:
michael@0 173 break
michael@0 174 time.sleep(0.25)
michael@0 175 if retcode != 0:
michael@0 176 raise Exception('Could not confirm Xvfb happiness')
michael@0 177
michael@0 178 def Stop(self):
michael@0 179 """Stop Xvfb if needed. Linux only."""
michael@0 180 if self._pid:
michael@0 181 try:
michael@0 182 os.kill(self._pid, signal.SIGKILL)
michael@0 183 except:
michael@0 184 pass
michael@0 185 del os.environ['DISPLAY']
michael@0 186 self._pid = 0
michael@0 187
michael@0 188
michael@0 189 class TestSharder(BaseTestSharder):
michael@0 190 """Responsible for sharding the tests on the connected devices."""
michael@0 191
michael@0 192 def __init__(self, attached_devices, test_suite, gtest_filter,
michael@0 193 test_arguments, timeout, rebaseline, performance_test,
michael@0 194 cleanup_test_files, tool, log_dump_name, fast_and_loose,
michael@0 195 build_type):
michael@0 196 BaseTestSharder.__init__(self, attached_devices)
michael@0 197 self.test_suite = test_suite
michael@0 198 self.test_suite_basename = os.path.basename(test_suite)
michael@0 199 self.gtest_filter = gtest_filter or ''
michael@0 200 self.test_arguments = test_arguments
michael@0 201 self.timeout = timeout
michael@0 202 self.rebaseline = rebaseline
michael@0 203 self.performance_test = performance_test
michael@0 204 self.cleanup_test_files = cleanup_test_files
michael@0 205 self.tool = tool
michael@0 206 self.log_dump_name = log_dump_name
michael@0 207 self.fast_and_loose = fast_and_loose
michael@0 208 self.build_type = build_type
michael@0 209 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
michael@0 210 test_arguments, timeout, rebaseline,
michael@0 211 performance_test, cleanup_test_files, tool, 0,
michael@0 212 not not self.log_dump_name, fast_and_loose,
michael@0 213 build_type)
michael@0 214 self.tests = []
michael@0 215 if not self.gtest_filter:
michael@0 216 # No filter has been specified, let's add all tests then.
michael@0 217 # The executable/apk needs to be copied before we can call GetAllTests.
michael@0 218 test.test_package.StripAndCopyExecutable()
michael@0 219 all_tests = test.test_package.GetAllTests()
michael@0 220 if not rebaseline:
michael@0 221 disabled_list = test.GetDisabledTests()
michael@0 222 # Only includes tests that do not have any match in the disabled list.
michael@0 223 all_tests = filter(lambda t:
michael@0 224 not any([fnmatch.fnmatch(t, disabled_pattern)
michael@0 225 for disabled_pattern in disabled_list]),
michael@0 226 all_tests)
michael@0 227 self.tests = all_tests
michael@0 228
michael@0 229 def CreateShardedTestRunner(self, device, index):
michael@0 230 """Creates a suite-specific test runner.
michael@0 231
michael@0 232 Args:
michael@0 233 device: Device serial where this shard will run.
michael@0 234 index: Index of this device in the pool.
michael@0 235
michael@0 236 Returns:
michael@0 237 A SingleTestRunner object.
michael@0 238 """
michael@0 239 device_num = len(self.attached_devices)
michael@0 240 shard_size = (len(self.tests) + device_num - 1) / device_num
michael@0 241 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
michael@0 242 test_filter = ':'.join(shard_test_list) + self.gtest_filter
michael@0 243 return SingleTestRunner(device, self.test_suite,
michael@0 244 test_filter, self.test_arguments, self.timeout,
michael@0 245 self.rebaseline, self.performance_test,
michael@0 246 self.cleanup_test_files, self.tool, index,
michael@0 247 not not self.log_dump_name, self.fast_and_loose,
michael@0 248 self.build_type)
michael@0 249
michael@0 250 def OnTestsCompleted(self, test_runners, test_results):
michael@0 251 """Notifies that we completed the tests."""
michael@0 252 test_results.LogFull('Unit test', os.path.basename(self.test_suite),
michael@0 253 self.build_type)
michael@0 254 test_results.PrintAnnotation()
michael@0 255 if test_results.failed and self.rebaseline:
michael@0 256 test_runners[0].UpdateFilter(test_results.failed)
michael@0 257 if self.log_dump_name:
michael@0 258 # Zip all debug info outputs into a file named by log_dump_name.
michael@0 259 debug_info.GTestDebugInfo.ZipAndCleanResults(
michael@0 260 os.path.join(TestSuiteDir(self.build_type), 'debug_info_dumps'),
michael@0 261 self.log_dump_name)
michael@0 262
michael@0 263
michael@0 264 def _RunATestSuite(options):
michael@0 265 """Run a single test suite.
michael@0 266
michael@0 267 Helper for Dispatch() to allow stop/restart of the emulator across
michael@0 268 test bundles. If using the emulator, we start it on entry and stop
michael@0 269 it on exit.
michael@0 270
michael@0 271 Args:
michael@0 272 options: options for running the tests.
michael@0 273
michael@0 274 Returns:
michael@0 275 0 if successful, number of failing tests otherwise.
michael@0 276 """
michael@0 277 step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
michael@0 278 buildbot_report.PrintNamedStep(step_name)
michael@0 279 attached_devices = []
michael@0 280 buildbot_emulators = []
michael@0 281
michael@0 282 if options.use_emulator:
michael@0 283 for n in range(options.emulator_count):
michael@0 284 t = TimeProfile('Emulator launch %d' % n)
michael@0 285 avd_name = None
michael@0 286 if n > 0:
michael@0 287 # Creates a temporary AVD for the extra emulators.
michael@0 288 avd_name = 'run_tests_avd_%d' % n
michael@0 289 buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
michael@0 290 buildbot_emulator.Launch(kill_all_emulators=n == 0)
michael@0 291 t.Stop()
michael@0 292 buildbot_emulators.append(buildbot_emulator)
michael@0 293 attached_devices.append(buildbot_emulator.device)
michael@0 294 # Wait for all emulators to boot completed.
michael@0 295 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
michael@0 296 buildbot_emulators)
michael@0 297 elif options.test_device:
michael@0 298 attached_devices = [options.test_device]
michael@0 299 else:
michael@0 300 attached_devices = android_commands.GetAttachedDevices()
michael@0 301
michael@0 302 if not attached_devices:
michael@0 303 logging.critical('A device must be attached and online.')
michael@0 304 buildbot_report.PrintError()
michael@0 305 return 1
michael@0 306
michael@0 307 # Reset the test port allocation. It's important to do it before starting
michael@0 308 # to dispatch any tests.
michael@0 309 if not ports.ResetTestServerPortAllocation():
michael@0 310 raise Exception('Failed to reset test server port.')
michael@0 311
michael@0 312 if options.performance_test or options.gtest_filter:
michael@0 313 # These configuration can't be split in multiple devices.
michael@0 314 attached_devices = [attached_devices[0]]
michael@0 315 sharder = TestSharder(attached_devices, options.test_suite,
michael@0 316 options.gtest_filter, options.test_arguments,
michael@0 317 options.timeout, options.rebaseline,
michael@0 318 options.performance_test,
michael@0 319 options.cleanup_test_files, options.tool,
michael@0 320 options.log_dump, options.fast_and_loose,
michael@0 321 options.build_type)
michael@0 322 test_results = sharder.RunShardedTests()
michael@0 323
michael@0 324 for buildbot_emulator in buildbot_emulators:
michael@0 325 buildbot_emulator.Shutdown()
michael@0 326
michael@0 327 # Another chance if we timed out? At this point It is safe(r) to
michael@0 328 # run fast and loose since we just uploaded all the test data and
michael@0 329 # binary.
michael@0 330 if test_results.timed_out and options.repeat:
michael@0 331 logging.critical('Timed out; repeating in fast_and_loose mode.')
michael@0 332 options.fast_and_loose = True
michael@0 333 options.repeat -= 1
michael@0 334 logging.critical('Repeats left: ' + str(options.repeat))
michael@0 335 return _RunATestSuite(options)
michael@0 336 return len(test_results.failed)
michael@0 337
michael@0 338
michael@0 339 def Dispatch(options):
michael@0 340 """Dispatches the tests, sharding if possible.
michael@0 341
michael@0 342 If options.use_emulator is True, all tests will be run in new emulator
michael@0 343 instance.
michael@0 344
michael@0 345 Args:
michael@0 346 options: options for running the tests.
michael@0 347
michael@0 348 Returns:
michael@0 349 0 if successful, number of failing tests otherwise.
michael@0 350 """
michael@0 351 if options.test_suite == 'help':
michael@0 352 ListTestSuites()
michael@0 353 return 0
michael@0 354
michael@0 355 if options.use_xvfb:
michael@0 356 xvfb = Xvfb()
michael@0 357 xvfb.Start()
michael@0 358
michael@0 359 all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
michael@0 360 options.build_type)
michael@0 361 failures = 0
michael@0 362 for suite in all_test_suites:
michael@0 363 options.test_suite = suite
michael@0 364 failures += _RunATestSuite(options)
michael@0 365
michael@0 366 if options.use_xvfb:
michael@0 367 xvfb.Stop()
michael@0 368 return failures
michael@0 369
michael@0 370
michael@0 371 def ListTestSuites():
michael@0 372 """Display a list of available test suites."""
michael@0 373 print 'Available test suites are:'
michael@0 374 for test_suite in _TEST_SUITES:
michael@0 375 print test_suite
michael@0 376
michael@0 377
michael@0 378 def main(argv):
michael@0 379 option_parser = optparse.OptionParser()
michael@0 380 test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
michael@0 381 option_parser.add_option('-s', '--suite', dest='test_suite',
michael@0 382 help='Executable name of the test suite to run '
michael@0 383 '(use -s help to list them)')
michael@0 384 option_parser.add_option('-d', '--device', dest='test_device',
michael@0 385 help='Target device the test suite to run ')
michael@0 386 option_parser.add_option('-r', dest='rebaseline',
michael@0 387 help='Rebaseline and update *testsuite_disabled',
michael@0 388 action='store_true')
michael@0 389 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
michael@0 390 help='gtest filter')
michael@0 391 option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
michael@0 392 help='Additional arguments to pass to the test')
michael@0 393 option_parser.add_option('-p', dest='performance_test',
michael@0 394 help='Indicator of performance test',
michael@0 395 action='store_true')
michael@0 396 option_parser.add_option('-L', dest='log_dump',
michael@0 397 help='file name of log dump, which will be put in '
michael@0 398 'subfolder debug_info_dumps under the same '
michael@0 399 'directory in where the test_suite exists.')
michael@0 400 option_parser.add_option('-e', '--emulator', dest='use_emulator',
michael@0 401 action='store_true',
michael@0 402 help='Run tests in a new instance of emulator')
michael@0 403 option_parser.add_option('-n', '--emulator_count',
michael@0 404 type='int', default=1,
michael@0 405 help='Number of emulators to launch for running the '
michael@0 406 'tests.')
michael@0 407 option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
michael@0 408 action='store_true',
michael@0 409 help='Use Xvfb around tests (ignored if not Linux)')
michael@0 410 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
michael@0 411 action='store_true',
michael@0 412 help='Go faster (but be less stable), '
michael@0 413 'for quick testing. Example: when tracking down '
michael@0 414 'tests that hang to add to the disabled list, '
michael@0 415 'there is no need to redeploy the test binary '
michael@0 416 'or data to the device again. '
michael@0 417 'Don\'t use on bots by default!')
michael@0 418 option_parser.add_option('--repeat', dest='repeat', type='int',
michael@0 419 default=2,
michael@0 420 help='Repeat count on test timeout')
michael@0 421 option_parser.add_option('--exit_code', action='store_true',
michael@0 422 help='If set, the exit code will be total number '
michael@0 423 'of failures.')
michael@0 424 option_parser.add_option('--exe', action='store_true',
michael@0 425 help='If set, use the exe test runner instead of '
michael@0 426 'the APK.')
michael@0 427
michael@0 428 options, args = option_parser.parse_args(argv)
michael@0 429 if len(args) > 1:
michael@0 430 print 'Unknown argument:', args[1:]
michael@0 431 option_parser.print_usage()
michael@0 432 sys.exit(1)
michael@0 433 run_tests_helper.SetLogLevel(options.verbose_count)
michael@0 434 emulator.DeleteAllTempAVDs()
michael@0 435 failed_tests_count = Dispatch(options)
michael@0 436
michael@0 437 # Failures of individual test suites are communicated by printing a
michael@0 438 # STEP_FAILURE message.
michael@0 439 # Returning a success exit status also prevents the buildbot from incorrectly
michael@0 440 # marking the last suite as failed if there were failures in other suites in
michael@0 441 # the batch (this happens because the exit status is a sum of all failures
michael@0 442 # from all suites, but the buildbot associates the exit status only with the
michael@0 443 # most recent step).
michael@0 444 if options.exit_code:
michael@0 445 return failed_tests_count
michael@0 446 return 0
michael@0 447
michael@0 448
michael@0 449 if __name__ == '__main__':
michael@0 450 sys.exit(main(sys.argv))

mercurial