media/webrtc/trunk/build/android/pylib/run_java_tests.py

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/media/webrtc/trunk/build/android/pylib/run_java_tests.py	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,591 @@
     1.4 +# Copyright (c) 2012 The Chromium Authors. All rights reserved.
     1.5 +# Use of this source code is governed by a BSD-style license that can be
     1.6 +# found in the LICENSE file.
     1.7 +
     1.8 +"""Runs the Java tests. See more information on run_instrumentation_tests.py."""
     1.9 +
    1.10 +import fnmatch
    1.11 +import logging
    1.12 +import os
    1.13 +import re
    1.14 +import shutil
    1.15 +import sys
    1.16 +import time
    1.17 +
    1.18 +import android_commands
    1.19 +import apk_info
    1.20 +from base_test_runner import BaseTestRunner
    1.21 +from base_test_sharder import BaseTestSharder, SetTestsContainer
    1.22 +import cmd_helper
    1.23 +import constants
    1.24 +import errors
    1.25 +from forwarder import Forwarder
    1.26 +from json_perf_parser import GetAverageRunInfoFromJSONString
    1.27 +from perf_tests_helper import PrintPerfResult
    1.28 +import sharded_tests_queue
    1.29 +from test_result import SingleTestResult, TestResults
    1.30 +import valgrind_tools
    1.31 +
    1.32 +_PERF_TEST_ANNOTATION = 'PerfTest'
    1.33 +
    1.34 +
    1.35 +class FatalTestException(Exception):
    1.36 +  """A fatal test exception."""
    1.37 +  pass
    1.38 +
    1.39 +
    1.40 +def _TestNameToExpectation(test_name):
    1.41 +  # A test name is a Package.Path.Class#testName; convert to what we use in
    1.42 +  # the expectation file.
    1.43 +  return '.'.join(test_name.replace('#', '.').split('.')[-2:])
    1.44 +
    1.45 +
    1.46 +def FilterTests(test_names, pattern_list, inclusive):
    1.47 +  """Filters |test_names| using a list of patterns.
    1.48 +
    1.49 +  Args:
    1.50 +    test_names: A list of test names.
    1.51 +    pattern_list: A list of patterns.
    1.52 +    inclusive: If True, returns the tests that match any pattern. if False,
    1.53 +               returns the tests that do not match any pattern.
    1.54 +  Returns:
    1.55 +    A list of test names.
    1.56 +  """
    1.57 +  ret = []
    1.58 +  for t in test_names:
    1.59 +    has_match = False
    1.60 +    for pattern in pattern_list:
    1.61 +      has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t),
    1.62 +                                               pattern)
    1.63 +    if has_match == inclusive:
    1.64 +      ret += [t]
    1.65 +  return ret
    1.66 +
    1.67 +
    1.68 +class TestRunner(BaseTestRunner):
    1.69 +  """Responsible for running a series of tests connected to a single device."""
    1.70 +
    1.71 +  _DEVICE_DATA_DIR = 'chrome/test/data'
    1.72 +  _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''),
    1.73 +                           'external/emma/lib/emma.jar')
    1.74 +  _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es'
    1.75 +  _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR')
    1.76 +  _COVERAGE_FILENAME = 'coverage.ec'
    1.77 +  _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' +
    1.78 +                           _COVERAGE_FILENAME)
    1.79 +  _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP',
    1.80 +                                                         ''),
    1.81 +                                          'out/target/common/obj/APPS',
    1.82 +                                          'Chrome_intermediates/coverage.em')
    1.83 +  _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
    1.84 +  _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
    1.85 +                                       '/chrome-profile*')
    1.86 +  _DEVICE_HAS_TEST_FILES = {}
    1.87 +
    1.88 +  def __init__(self, options, device, tests_iter, coverage, shard_index, apks,
    1.89 +               ports_to_forward):
    1.90 +    """Create a new TestRunner.
    1.91 +
    1.92 +    Args:
    1.93 +      options: An options object with the following required attributes:
    1.94 +      -  build_type: 'Release' or 'Debug'.
    1.95 +      -  install_apk: Re-installs the apk if opted.
    1.96 +      -  save_perf_json: Whether or not to save the JSON file from UI perf
    1.97 +            tests.
    1.98 +      -  screenshot_failures: Take a screenshot for a test failure
    1.99 +      -  tool: Name of the Valgrind tool.
   1.100 +      -  wait_for_debugger: blocks until the debugger is connected.
   1.101 +      device: Attached android device.
   1.102 +      tests_iter: A list of tests to be run.
   1.103 +      coverage: Collects coverage information if opted.
   1.104 +      shard_index: shard # for this TestRunner, used to create unique port
   1.105 +          numbers.
   1.106 +      apks: A list of ApkInfo objects need to be installed. The first element
   1.107 +            should be the tests apk, the rests could be the apks used in test.
   1.108 +            The default is ChromeTest.apk.
   1.109 +      ports_to_forward: A list of port numbers for which to set up forwarders.
   1.110 +                        Can be optionally requested by a test case.
   1.111 +    Raises:
   1.112 +      FatalTestException: if coverage metadata is not available.
   1.113 +    """
   1.114 +    BaseTestRunner.__init__(
   1.115 +        self, device, options.tool, shard_index, options.build_type)
   1.116 +
   1.117 +    if not apks:
   1.118 +      apks = [apk_info.ApkInfo(options.test_apk_path,
   1.119 +                               options.test_apk_jar_path)]
   1.120 +
   1.121 +    self.build_type = options.build_type
   1.122 +    self.install_apk = options.install_apk
   1.123 +    self.save_perf_json = options.save_perf_json
   1.124 +    self.screenshot_failures = options.screenshot_failures
   1.125 +    self.wait_for_debugger = options.wait_for_debugger
   1.126 +
   1.127 +    self.tests_iter = tests_iter
   1.128 +    self.coverage = coverage
   1.129 +    self.apks = apks
   1.130 +    self.test_apk = apks[0]
   1.131 +    self.instrumentation_class_path = self.test_apk.GetPackageName()
   1.132 +    self.ports_to_forward = ports_to_forward
   1.133 +
   1.134 +    self.test_results = TestResults()
   1.135 +    self.forwarder = None
   1.136 +
   1.137 +    if self.coverage:
   1.138 +      if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
   1.139 +        os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
   1.140 +      if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
   1.141 +        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
   1.142 +                                 ' : Coverage meta info [' +
   1.143 +                                 TestRunner._COVERAGE_META_INFO_PATH +
   1.144 +                                 '] does not exist.')
   1.145 +      if (not TestRunner._COVERAGE_WEB_ROOT_DIR or
   1.146 +          not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
   1.147 +        raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
   1.148 +                                 ' : Path specified in $EMMA_WEB_ROOTDIR [' +
   1.149 +                                 TestRunner._COVERAGE_WEB_ROOT_DIR +
   1.150 +                                 '] does not exist.')
   1.151 +
   1.152 +  def _GetTestsIter(self):
   1.153 +    if not self.tests_iter:
   1.154 +      # multiprocessing.Queue can't be pickled across processes if we have it as
   1.155 +      # a member set during constructor.  Grab one here instead.
   1.156 +      self.tests_iter = (BaseTestSharder.tests_container)
   1.157 +    assert self.tests_iter
   1.158 +    return self.tests_iter
   1.159 +
   1.160 +  def CopyTestFilesOnce(self):
   1.161 +    """Pushes the test data files to the device. Installs the apk if opted."""
   1.162 +    if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
   1.163 +      logging.warning('Already copied test files to device %s, skipping.',
   1.164 +                      self.device)
   1.165 +      return
   1.166 +    host_test_files = [
   1.167 +        ('android_webview/test/data/device_files', 'webview'),
   1.168 +        ('content/test/data/android/device_files', 'content'),
   1.169 +        ('chrome/test/data/android/device_files', 'chrome')
   1.170 +    ]
   1.171 +    for (host_src, dst_layer) in host_test_files:
   1.172 +      host_test_files_path = constants.CHROME_DIR + '/' + host_src
   1.173 +      if os.path.exists(host_test_files_path):
   1.174 +        self.adb.PushIfNeeded(host_test_files_path,
   1.175 +                              self.adb.GetExternalStorage() + '/' +
   1.176 +                              TestRunner._DEVICE_DATA_DIR + '/' + dst_layer)
   1.177 +    if self.install_apk:
   1.178 +      for apk in self.apks:
   1.179 +        self.adb.ManagedInstall(apk.GetApkPath(),
   1.180 +                                package_name=apk.GetPackageName())
   1.181 +    self.tool.CopyFiles()
   1.182 +    TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
   1.183 +
   1.184 +  def SaveCoverageData(self, test):
   1.185 +    """Saves the Emma coverage data before it's overwritten by the next test.
   1.186 +
   1.187 +    Args:
   1.188 +      test: the test whose coverage data is collected.
   1.189 +    """
   1.190 +    if not self.coverage:
   1.191 +      return
   1.192 +    if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH,
   1.193 +                               constants.CHROME_DIR):
   1.194 +      logging.error('ERROR: Unable to find file ' +
   1.195 +                    TestRunner._COVERAGE_RESULT_PATH +
   1.196 +                    ' on the device for test ' + test)
   1.197 +    pulled_coverage_file = os.path.join(constants.CHROME_DIR,
   1.198 +                                        TestRunner._COVERAGE_FILENAME)
   1.199 +    if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
   1.200 +      cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge',
   1.201 +             '-in', pulled_coverage_file,
   1.202 +             '-in', TestRunner._COVERAGE_MERGED_FILENAME,
   1.203 +             '-out', TestRunner._COVERAGE_MERGED_FILENAME]
   1.204 +      cmd_helper.RunCmd(cmd)
   1.205 +    else:
   1.206 +      shutil.copy(pulled_coverage_file,
   1.207 +                  TestRunner._COVERAGE_MERGED_FILENAME)
   1.208 +    os.remove(pulled_coverage_file)
   1.209 +
   1.210 +  def GenerateCoverageReportIfNeeded(self):
   1.211 +    """Uses the Emma to generate a coverage report and a html page."""
   1.212 +    if not self.coverage:
   1.213 +      return
   1.214 +    cmd = ['java', '-classpath', TestRunner._EMMA_JAR,
   1.215 +           'emma', 'report', '-r', 'html',
   1.216 +           '-in', TestRunner._COVERAGE_MERGED_FILENAME,
   1.217 +           '-in', TestRunner._COVERAGE_META_INFO_PATH]
   1.218 +    cmd_helper.RunCmd(cmd)
   1.219 +    new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
   1.220 +                           time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M'))
   1.221 +    shutil.copytree('coverage', new_dir)
   1.222 +
   1.223 +    latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR,
   1.224 +                              'Latest_Coverage_Run')
   1.225 +    if os.path.exists(latest_dir):
   1.226 +      shutil.rmtree(latest_dir)
   1.227 +    os.mkdir(latest_dir)
   1.228 +    webserver_new_index = os.path.join(new_dir, 'index.html')
   1.229 +    webserver_new_files = os.path.join(new_dir, '_files')
   1.230 +    webserver_latest_index = os.path.join(latest_dir, 'index.html')
   1.231 +    webserver_latest_files = os.path.join(latest_dir, '_files')
   1.232 +    # Setup new softlinks to last result.
   1.233 +    os.symlink(webserver_new_index, webserver_latest_index)
   1.234 +    os.symlink(webserver_new_files, webserver_latest_files)
   1.235 +    cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir])
   1.236 +
   1.237 +  def _GetInstrumentationArgs(self):
   1.238 +    ret = {}
   1.239 +    if self.coverage:
   1.240 +      ret['coverage'] = 'true'
   1.241 +    if self.wait_for_debugger:
   1.242 +      ret['debug'] = 'true'
   1.243 +    return ret
   1.244 +
   1.245 +  def _TakeScreenshot(self, test):
   1.246 +    """Takes a screenshot from the device."""
   1.247 +    screenshot_tool = os.path.join(constants.CHROME_DIR,
   1.248 +        'third_party/android_tools/sdk/tools/monkeyrunner')
   1.249 +    screenshot_script = os.path.join(constants.CHROME_DIR,
   1.250 +        'build/android/monkeyrunner_screenshot.py')
   1.251 +    screenshot_path = os.path.join(constants.CHROME_DIR,
   1.252 +                                   'out_screenshots')
   1.253 +    if not os.path.exists(screenshot_path):
   1.254 +      os.mkdir(screenshot_path)
   1.255 +    screenshot_name = os.path.join(screenshot_path, test + '.png')
   1.256 +    logging.info('Taking screenshot named %s', screenshot_name)
   1.257 +    cmd_helper.RunCmd([screenshot_tool, screenshot_script,
   1.258 +                       '--serial', self.device,
   1.259 +                       '--file', screenshot_name])
   1.260 +
   1.261 +  def SetUp(self):
   1.262 +    """Sets up the test harness and device before all tests are run."""
   1.263 +    super(TestRunner, self).SetUp()
   1.264 +    if not self.adb.IsRootEnabled():
   1.265 +      logging.warning('Unable to enable java asserts for %s, non rooted device',
   1.266 +                      self.device)
   1.267 +    else:
   1.268 +      if self.adb.SetJavaAssertsEnabled(enable=True):
   1.269 +        self.adb.Reboot(full_reboot=False)
   1.270 +
   1.271 +    # We give different default value to launch HTTP server based on shard index
   1.272 +    # because it may have race condition when multiple processes are trying to
   1.273 +    # launch lighttpd with same port at same time.
   1.274 +    http_server_ports = self.LaunchTestHttpServer(
   1.275 +        os.path.join(constants.CHROME_DIR),
   1.276 +        (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index))
   1.277 +    if self.ports_to_forward:
   1.278 +      port_pairs = [(port, port) for port in self.ports_to_forward]
   1.279 +      # We need to remember which ports the HTTP server is using, since the
   1.280 +      # forwarder will stomp on them otherwise.
   1.281 +      port_pairs.append(http_server_ports)
   1.282 +      self.forwarder = Forwarder(
   1.283 +         self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type)
   1.284 +    self.CopyTestFilesOnce()
   1.285 +    self.flags.AddFlags(['--enable-test-intents'])
   1.286 +
   1.287 +  def TearDown(self):
   1.288 +    """Cleans up the test harness and saves outstanding data from test run."""
   1.289 +    if self.forwarder:
   1.290 +      self.forwarder.Close()
   1.291 +    self.GenerateCoverageReportIfNeeded()
   1.292 +    super(TestRunner, self).TearDown()
   1.293 +
   1.294 +  def TestSetup(self, test):
   1.295 +    """Sets up the test harness for running a particular test.
   1.296 +
   1.297 +    Args:
   1.298 +      test: The name of the test that will be run.
   1.299 +    """
   1.300 +    self.SetupPerfMonitoringIfNeeded(test)
   1.301 +    self._SetupIndividualTestTimeoutScale(test)
   1.302 +    self.tool.SetupEnvironment()
   1.303 +
   1.304 +    # Make sure the forwarder is still running.
   1.305 +    self.RestartHttpServerForwarderIfNecessary()
   1.306 +
   1.307 +  def _IsPerfTest(self, test):
   1.308 +    """Determines whether a test is a performance test.
   1.309 +
   1.310 +    Args:
   1.311 +      test: The name of the test to be checked.
   1.312 +
   1.313 +    Returns:
   1.314 +      Whether the test is annotated as a performance test.
   1.315 +    """
   1.316 +    return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test)
   1.317 +
   1.318 +  def SetupPerfMonitoringIfNeeded(self, test):
   1.319 +    """Sets up performance monitoring if the specified test requires it.
   1.320 +
   1.321 +    Args:
   1.322 +      test: The name of the test to be run.
   1.323 +    """
   1.324 +    if not self._IsPerfTest(test):
   1.325 +      return
   1.326 +    self.adb.Adb().SendCommand('shell rm ' +
   1.327 +                               TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
   1.328 +    self.adb.StartMonitoringLogcat()
   1.329 +
   1.330 +  def TestTeardown(self, test, test_result):
   1.331 +    """Cleans up the test harness after running a particular test.
   1.332 +
   1.333 +    Depending on the options of this TestRunner this might handle coverage
   1.334 +    tracking or performance tracking.  This method will only be called if the
   1.335 +    test passed.
   1.336 +
   1.337 +    Args:
   1.338 +      test: The name of the test that was just run.
   1.339 +      test_result: result for this test.
   1.340 +    """
   1.341 +
   1.342 +    self.tool.CleanUpEnvironment()
   1.343 +
   1.344 +    # The logic below relies on the test passing.
   1.345 +    if not test_result or test_result.GetStatusCode():
   1.346 +      return
   1.347 +
   1.348 +    self.TearDownPerfMonitoring(test)
   1.349 +    self.SaveCoverageData(test)
   1.350 +
   1.351 +  def TearDownPerfMonitoring(self, test):
   1.352 +    """Cleans up performance monitoring if the specified test required it.
   1.353 +
   1.354 +    Args:
   1.355 +      test: The name of the test that was just run.
   1.356 +    Raises:
   1.357 +      FatalTestException: if there's anything wrong with the perf data.
   1.358 +    """
   1.359 +    if not self._IsPerfTest(test):
   1.360 +      return
   1.361 +    raw_test_name = test.split('#')[1]
   1.362 +
   1.363 +    # Wait and grab annotation data so we can figure out which traces to parse
   1.364 +    regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
   1.365 +                                                raw_test_name +
   1.366 +                                                '\)\:(.*)'), None)
   1.367 +
   1.368 +    # If the test is set to run on a specific device type only (IE: only
   1.369 +    # tablet or phone) and it is being run on the wrong device, the test
   1.370 +    # just quits and does not do anything.  The java test harness will still
   1.371 +    # print the appropriate annotation for us, but will add --NORUN-- for
   1.372 +    # us so we know to ignore the results.
   1.373 +    # The --NORUN-- tag is managed by MainActivityTestBase.java
   1.374 +    if regex.group(1) != '--NORUN--':
   1.375 +
   1.376 +      # Obtain the relevant perf data.  The data is dumped to a
   1.377 +      # JSON formatted file.
   1.378 +      json_string = self.adb.GetFileContents(
   1.379 +          '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
   1.380 +
   1.381 +      if json_string:
   1.382 +        json_string = '\n'.join(json_string)
   1.383 +      else:
   1.384 +        raise FatalTestException('Perf file does not exist or is empty')
   1.385 +
   1.386 +      if self.save_perf_json:
   1.387 +        json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
   1.388 +        with open(json_local_file, 'w') as f:
   1.389 +          f.write(json_string)
   1.390 +        logging.info('Saving Perf UI JSON from test ' +
   1.391 +                     test + ' to ' + json_local_file)
   1.392 +
   1.393 +      raw_perf_data = regex.group(1).split(';')
   1.394 +
   1.395 +      for raw_perf_set in raw_perf_data:
   1.396 +        if raw_perf_set:
   1.397 +          perf_set = raw_perf_set.split(',')
   1.398 +          if len(perf_set) != 3:
   1.399 +            raise FatalTestException('Unexpected number of tokens in '
   1.400 +                                     'perf annotation string: ' + raw_perf_set)
   1.401 +
   1.402 +          # Process the performance data
   1.403 +          result = GetAverageRunInfoFromJSONString(json_string, perf_set[0])
   1.404 +
   1.405 +          PrintPerfResult(perf_set[1], perf_set[2],
   1.406 +                          [result['average']], result['units'])
   1.407 +
   1.408 +  def _SetupIndividualTestTimeoutScale(self, test):
   1.409 +    timeout_scale = self._GetIndividualTestTimeoutScale(test)
   1.410 +    valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale)
   1.411 +
   1.412 +  def _GetIndividualTestTimeoutScale(self, test):
   1.413 +    """Returns the timeout scale for the given |test|."""
   1.414 +    annotations = self.apks[0].GetTestAnnotations(test)
   1.415 +    timeout_scale = 1
   1.416 +    if 'TimeoutScale' in annotations:
   1.417 +      for annotation in annotations:
   1.418 +        scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
   1.419 +        if scale_match:
   1.420 +          timeout_scale = int(scale_match.group(1))
   1.421 +    if self.wait_for_debugger:
   1.422 +      timeout_scale *= 100
   1.423 +    return timeout_scale
   1.424 +
   1.425 +  def _GetIndividualTestTimeoutSecs(self, test):
   1.426 +    """Returns the timeout in seconds for the given |test|."""
   1.427 +    annotations = self.apks[0].GetTestAnnotations(test)
   1.428 +    if 'Manual' in annotations:
   1.429 +      return 600 * 60
   1.430 +    if 'External' in annotations:
   1.431 +      return 10 * 60
   1.432 +    if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
   1.433 +      return 5 * 60
   1.434 +    if 'MediumTest' in annotations:
   1.435 +      return 3 * 60
   1.436 +    return 1 * 60
   1.437 +
   1.438 +  def RunTests(self):
   1.439 +    """Runs the tests, generating the coverage if needed.
   1.440 +
   1.441 +    Returns:
   1.442 +      A TestResults object.
   1.443 +    """
   1.444 +    instrumentation_path = (self.instrumentation_class_path +
   1.445 +                            '/android.test.InstrumentationTestRunner')
   1.446 +    instrumentation_args = self._GetInstrumentationArgs()
   1.447 +    for test in self._GetTestsIter():
   1.448 +      test_result = None
   1.449 +      start_date_ms = None
   1.450 +      try:
   1.451 +        self.TestSetup(test)
   1.452 +        start_date_ms = int(time.time()) * 1000
   1.453 +        args_with_filter = dict(instrumentation_args)
   1.454 +        args_with_filter['class'] = test
   1.455 +        # |test_results| is a list that should contain
   1.456 +        # a single TestResult object.
   1.457 +        logging.warn(args_with_filter)
   1.458 +        (test_results, _) = self.adb.Adb().StartInstrumentation(
   1.459 +            instrumentation_path=instrumentation_path,
   1.460 +            instrumentation_args=args_with_filter,
   1.461 +            timeout_time=(self._GetIndividualTestTimeoutSecs(test) *
   1.462 +                          self._GetIndividualTestTimeoutScale(test) *
   1.463 +                          self.tool.GetTimeoutScale()))
   1.464 +        duration_ms = int(time.time()) * 1000 - start_date_ms
   1.465 +        assert len(test_results) == 1
   1.466 +        test_result = test_results[0]
   1.467 +        status_code = test_result.GetStatusCode()
   1.468 +        if status_code:
   1.469 +          log = test_result.GetFailureReason()
   1.470 +          if not log:
   1.471 +            log = 'No information.'
   1.472 +          if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
   1.473 +            self._TakeScreenshot(test)
   1.474 +          self.test_results.failed += [SingleTestResult(test, start_date_ms,
   1.475 +                                                        duration_ms, log)]
   1.476 +        else:
   1.477 +          result = [SingleTestResult(test, start_date_ms, duration_ms)]
   1.478 +          self.test_results.ok += result
   1.479 +      # Catch exceptions thrown by StartInstrumentation().
   1.480 +      # See ../../third_party/android/testrunner/adb_interface.py
   1.481 +      except (errors.WaitForResponseTimedOutError,
   1.482 +              errors.DeviceUnresponsiveError,
   1.483 +              errors.InstrumentationError), e:
   1.484 +        if start_date_ms:
   1.485 +          duration_ms = int(time.time()) * 1000 - start_date_ms
   1.486 +        else:
   1.487 +          start_date_ms = int(time.time()) * 1000
   1.488 +          duration_ms = 0
   1.489 +        message = str(e)
   1.490 +        if not message:
   1.491 +          message = 'No information.'
   1.492 +        self.test_results.crashed += [SingleTestResult(test, start_date_ms,
   1.493 +                                                       duration_ms,
   1.494 +                                                       message)]
   1.495 +        test_result = None
   1.496 +      self.TestTeardown(test, test_result)
   1.497 +    return self.test_results
   1.498 +
   1.499 +
   1.500 +class TestSharder(BaseTestSharder):
   1.501 +  """Responsible for sharding the tests on the connected devices."""
   1.502 +
   1.503 +  def __init__(self, attached_devices, options, tests, apks):
   1.504 +    BaseTestSharder.__init__(self, attached_devices)
   1.505 +    self.options = options
   1.506 +    self.tests = tests
   1.507 +    self.apks = apks
   1.508 +
   1.509 +  def SetupSharding(self, tests):
   1.510 +    """Called before starting the shards."""
   1.511 +    SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
   1.512 +        len(self.attached_devices), tests))
   1.513 +
   1.514 +  def CreateShardedTestRunner(self, device, index):
   1.515 +    """Creates a sharded test runner.
   1.516 +
   1.517 +    Args:
   1.518 +      device: Device serial where this shard will run.
   1.519 +      index: Index of this device in the pool.
   1.520 +
   1.521 +    Returns:
   1.522 +      A TestRunner object.
   1.523 +    """
   1.524 +    return TestRunner(self.options, device, None, False, index, self.apks, [])
   1.525 +
   1.526 +
   1.527 +def DispatchJavaTests(options, apks):
   1.528 +  """Dispatches Java tests onto connected device(s).
   1.529 +
   1.530 +  If possible, this method will attempt to shard the tests to
   1.531 +  all connected devices. Otherwise, dispatch and run tests on one device.
   1.532 +
   1.533 +  Args:
   1.534 +    options: Command line options.
   1.535 +    apks: list of APKs to use.
   1.536 +
   1.537 +  Returns:
   1.538 +    A TestResults object holding the results of the Java tests.
   1.539 +
   1.540 +  Raises:
   1.541 +    FatalTestException: when there's no attached the devices.
   1.542 +  """
   1.543 +  test_apk = apks[0]
   1.544 +  if options.annotation:
   1.545 +    available_tests = test_apk.GetAnnotatedTests(options.annotation)
   1.546 +    if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest':
   1.547 +      tests_without_annotation = [
   1.548 +          m for m in
   1.549 +          test_apk.GetTestMethods()
   1.550 +          if not test_apk.GetTestAnnotations(m) and
   1.551 +          not apk_info.ApkInfo.IsPythonDrivenTest(m)]
   1.552 +      if tests_without_annotation:
   1.553 +        tests_without_annotation.sort()
   1.554 +        logging.warning('The following tests do not contain any annotation. '
   1.555 +                        'Assuming "SmallTest":\n%s',
   1.556 +                        '\n'.join(tests_without_annotation))
   1.557 +        available_tests += tests_without_annotation
   1.558 +  else:
   1.559 +    available_tests = [m for m in test_apk.GetTestMethods()
   1.560 +                       if not apk_info.ApkInfo.IsPythonDrivenTest(m)]
   1.561 +  coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'
   1.562 +
   1.563 +  tests = []
   1.564 +  if options.test_filter:
   1.565 +    # |available_tests| are in adb instrument format: package.path.class#test.
   1.566 +    filter_without_hash = options.test_filter.replace('#', '.')
   1.567 +    tests = [t for t in available_tests
   1.568 +             if filter_without_hash in t.replace('#', '.')]
   1.569 +  else:
   1.570 +    tests = available_tests
   1.571 +
   1.572 +  if not tests:
   1.573 +    logging.warning('No Java tests to run with current args.')
   1.574 +    return TestResults()
   1.575 +
   1.576 +  tests *= options.number_of_runs
   1.577 +
   1.578 +  attached_devices = android_commands.GetAttachedDevices()
   1.579 +  test_results = TestResults()
   1.580 +
   1.581 +  if not attached_devices:
   1.582 +    raise FatalTestException('You have no devices attached or visible!')
   1.583 +  if options.device:
   1.584 +    attached_devices = [options.device]
   1.585 +
   1.586 +  logging.info('Will run: %s', str(tests))
   1.587 +
   1.588 +  if len(attached_devices) > 1 and (coverage or options.wait_for_debugger):
   1.589 +    logging.warning('Coverage / debugger can not be sharded, '
   1.590 +                    'using first available device')
   1.591 +    attached_devices = attached_devices[:1]
   1.592 +  sharder = TestSharder(attached_devices, options, tests, apks)
   1.593 +  test_results = sharder.RunShardedTests()
   1.594 +  return test_results

mercurial