Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
michael@0 | 2 | # Use of this source code is governed by a BSD-style license that can be |
michael@0 | 3 | # found in the LICENSE file. |
michael@0 | 4 | |
michael@0 | 5 | """Runs the Java tests. See more information on run_instrumentation_tests.py.""" |
michael@0 | 6 | |
michael@0 | 7 | import fnmatch |
michael@0 | 8 | import logging |
michael@0 | 9 | import os |
michael@0 | 10 | import re |
michael@0 | 11 | import shutil |
michael@0 | 12 | import sys |
michael@0 | 13 | import time |
michael@0 | 14 | |
michael@0 | 15 | import android_commands |
michael@0 | 16 | import apk_info |
michael@0 | 17 | from base_test_runner import BaseTestRunner |
michael@0 | 18 | from base_test_sharder import BaseTestSharder, SetTestsContainer |
michael@0 | 19 | import cmd_helper |
michael@0 | 20 | import constants |
michael@0 | 21 | import errors |
michael@0 | 22 | from forwarder import Forwarder |
michael@0 | 23 | from json_perf_parser import GetAverageRunInfoFromJSONString |
michael@0 | 24 | from perf_tests_helper import PrintPerfResult |
michael@0 | 25 | import sharded_tests_queue |
michael@0 | 26 | from test_result import SingleTestResult, TestResults |
michael@0 | 27 | import valgrind_tools |
michael@0 | 28 | |
michael@0 | 29 | _PERF_TEST_ANNOTATION = 'PerfTest' |
michael@0 | 30 | |
michael@0 | 31 | |
michael@0 | 32 | class FatalTestException(Exception): |
michael@0 | 33 | """A fatal test exception.""" |
michael@0 | 34 | pass |
michael@0 | 35 | |
michael@0 | 36 | |
michael@0 | 37 | def _TestNameToExpectation(test_name): |
michael@0 | 38 | # A test name is a Package.Path.Class#testName; convert to what we use in |
michael@0 | 39 | # the expectation file. |
michael@0 | 40 | return '.'.join(test_name.replace('#', '.').split('.')[-2:]) |
michael@0 | 41 | |
michael@0 | 42 | |
michael@0 | 43 | def FilterTests(test_names, pattern_list, inclusive): |
michael@0 | 44 | """Filters |test_names| using a list of patterns. |
michael@0 | 45 | |
michael@0 | 46 | Args: |
michael@0 | 47 | test_names: A list of test names. |
michael@0 | 48 | pattern_list: A list of patterns. |
michael@0 | 49 | inclusive: If True, returns the tests that match any pattern. if False, |
michael@0 | 50 | returns the tests that do not match any pattern. |
michael@0 | 51 | Returns: |
michael@0 | 52 | A list of test names. |
michael@0 | 53 | """ |
michael@0 | 54 | ret = [] |
michael@0 | 55 | for t in test_names: |
michael@0 | 56 | has_match = False |
michael@0 | 57 | for pattern in pattern_list: |
michael@0 | 58 | has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t), |
michael@0 | 59 | pattern) |
michael@0 | 60 | if has_match == inclusive: |
michael@0 | 61 | ret += [t] |
michael@0 | 62 | return ret |
michael@0 | 63 | |
michael@0 | 64 | |
michael@0 | 65 | class TestRunner(BaseTestRunner): |
michael@0 | 66 | """Responsible for running a series of tests connected to a single device.""" |
michael@0 | 67 | |
michael@0 | 68 | _DEVICE_DATA_DIR = 'chrome/test/data' |
michael@0 | 69 | _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), |
michael@0 | 70 | 'external/emma/lib/emma.jar') |
michael@0 | 71 | _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' |
michael@0 | 72 | _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') |
michael@0 | 73 | _COVERAGE_FILENAME = 'coverage.ec' |
michael@0 | 74 | _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + |
michael@0 | 75 | _COVERAGE_FILENAME) |
michael@0 | 76 | _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', |
michael@0 | 77 | ''), |
michael@0 | 78 | 'out/target/common/obj/APPS', |
michael@0 | 79 | 'Chrome_intermediates/coverage.em') |
michael@0 | 80 | _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' |
michael@0 | 81 | _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + |
michael@0 | 82 | '/chrome-profile*') |
michael@0 | 83 | _DEVICE_HAS_TEST_FILES = {} |
michael@0 | 84 | |
michael@0 | 85 | def __init__(self, options, device, tests_iter, coverage, shard_index, apks, |
michael@0 | 86 | ports_to_forward): |
michael@0 | 87 | """Create a new TestRunner. |
michael@0 | 88 | |
michael@0 | 89 | Args: |
michael@0 | 90 | options: An options object with the following required attributes: |
michael@0 | 91 | - build_type: 'Release' or 'Debug'. |
michael@0 | 92 | - install_apk: Re-installs the apk if opted. |
michael@0 | 93 | - save_perf_json: Whether or not to save the JSON file from UI perf |
michael@0 | 94 | tests. |
michael@0 | 95 | - screenshot_failures: Take a screenshot for a test failure |
michael@0 | 96 | - tool: Name of the Valgrind tool. |
michael@0 | 97 | - wait_for_debugger: blocks until the debugger is connected. |
michael@0 | 98 | device: Attached android device. |
michael@0 | 99 | tests_iter: A list of tests to be run. |
michael@0 | 100 | coverage: Collects coverage information if opted. |
michael@0 | 101 | shard_index: shard # for this TestRunner, used to create unique port |
michael@0 | 102 | numbers. |
michael@0 | 103 | apks: A list of ApkInfo objects need to be installed. The first element |
michael@0 | 104 | should be the tests apk, the rests could be the apks used in test. |
michael@0 | 105 | The default is ChromeTest.apk. |
michael@0 | 106 | ports_to_forward: A list of port numbers for which to set up forwarders. |
michael@0 | 107 | Can be optionally requested by a test case. |
michael@0 | 108 | Raises: |
michael@0 | 109 | FatalTestException: if coverage metadata is not available. |
michael@0 | 110 | """ |
michael@0 | 111 | BaseTestRunner.__init__( |
michael@0 | 112 | self, device, options.tool, shard_index, options.build_type) |
michael@0 | 113 | |
michael@0 | 114 | if not apks: |
michael@0 | 115 | apks = [apk_info.ApkInfo(options.test_apk_path, |
michael@0 | 116 | options.test_apk_jar_path)] |
michael@0 | 117 | |
michael@0 | 118 | self.build_type = options.build_type |
michael@0 | 119 | self.install_apk = options.install_apk |
michael@0 | 120 | self.save_perf_json = options.save_perf_json |
michael@0 | 121 | self.screenshot_failures = options.screenshot_failures |
michael@0 | 122 | self.wait_for_debugger = options.wait_for_debugger |
michael@0 | 123 | |
michael@0 | 124 | self.tests_iter = tests_iter |
michael@0 | 125 | self.coverage = coverage |
michael@0 | 126 | self.apks = apks |
michael@0 | 127 | self.test_apk = apks[0] |
michael@0 | 128 | self.instrumentation_class_path = self.test_apk.GetPackageName() |
michael@0 | 129 | self.ports_to_forward = ports_to_forward |
michael@0 | 130 | |
michael@0 | 131 | self.test_results = TestResults() |
michael@0 | 132 | self.forwarder = None |
michael@0 | 133 | |
michael@0 | 134 | if self.coverage: |
michael@0 | 135 | if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
michael@0 | 136 | os.remove(TestRunner._COVERAGE_MERGED_FILENAME) |
michael@0 | 137 | if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): |
michael@0 | 138 | raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + |
michael@0 | 139 | ' : Coverage meta info [' + |
michael@0 | 140 | TestRunner._COVERAGE_META_INFO_PATH + |
michael@0 | 141 | '] does not exist.') |
michael@0 | 142 | if (not TestRunner._COVERAGE_WEB_ROOT_DIR or |
michael@0 | 143 | not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): |
michael@0 | 144 | raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + |
michael@0 | 145 | ' : Path specified in $EMMA_WEB_ROOTDIR [' + |
michael@0 | 146 | TestRunner._COVERAGE_WEB_ROOT_DIR + |
michael@0 | 147 | '] does not exist.') |
michael@0 | 148 | |
michael@0 | 149 | def _GetTestsIter(self): |
michael@0 | 150 | if not self.tests_iter: |
michael@0 | 151 | # multiprocessing.Queue can't be pickled across processes if we have it as |
michael@0 | 152 | # a member set during constructor. Grab one here instead. |
michael@0 | 153 | self.tests_iter = (BaseTestSharder.tests_container) |
michael@0 | 154 | assert self.tests_iter |
michael@0 | 155 | return self.tests_iter |
michael@0 | 156 | |
michael@0 | 157 | def CopyTestFilesOnce(self): |
michael@0 | 158 | """Pushes the test data files to the device. Installs the apk if opted.""" |
michael@0 | 159 | if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): |
michael@0 | 160 | logging.warning('Already copied test files to device %s, skipping.', |
michael@0 | 161 | self.device) |
michael@0 | 162 | return |
michael@0 | 163 | host_test_files = [ |
michael@0 | 164 | ('android_webview/test/data/device_files', 'webview'), |
michael@0 | 165 | ('content/test/data/android/device_files', 'content'), |
michael@0 | 166 | ('chrome/test/data/android/device_files', 'chrome') |
michael@0 | 167 | ] |
michael@0 | 168 | for (host_src, dst_layer) in host_test_files: |
michael@0 | 169 | host_test_files_path = constants.CHROME_DIR + '/' + host_src |
michael@0 | 170 | if os.path.exists(host_test_files_path): |
michael@0 | 171 | self.adb.PushIfNeeded(host_test_files_path, |
michael@0 | 172 | self.adb.GetExternalStorage() + '/' + |
michael@0 | 173 | TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) |
michael@0 | 174 | if self.install_apk: |
michael@0 | 175 | for apk in self.apks: |
michael@0 | 176 | self.adb.ManagedInstall(apk.GetApkPath(), |
michael@0 | 177 | package_name=apk.GetPackageName()) |
michael@0 | 178 | self.tool.CopyFiles() |
michael@0 | 179 | TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True |
michael@0 | 180 | |
michael@0 | 181 | def SaveCoverageData(self, test): |
michael@0 | 182 | """Saves the Emma coverage data before it's overwritten by the next test. |
michael@0 | 183 | |
michael@0 | 184 | Args: |
michael@0 | 185 | test: the test whose coverage data is collected. |
michael@0 | 186 | """ |
michael@0 | 187 | if not self.coverage: |
michael@0 | 188 | return |
michael@0 | 189 | if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, |
michael@0 | 190 | constants.CHROME_DIR): |
michael@0 | 191 | logging.error('ERROR: Unable to find file ' + |
michael@0 | 192 | TestRunner._COVERAGE_RESULT_PATH + |
michael@0 | 193 | ' on the device for test ' + test) |
michael@0 | 194 | pulled_coverage_file = os.path.join(constants.CHROME_DIR, |
michael@0 | 195 | TestRunner._COVERAGE_FILENAME) |
michael@0 | 196 | if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): |
michael@0 | 197 | cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', |
michael@0 | 198 | '-in', pulled_coverage_file, |
michael@0 | 199 | '-in', TestRunner._COVERAGE_MERGED_FILENAME, |
michael@0 | 200 | '-out', TestRunner._COVERAGE_MERGED_FILENAME] |
michael@0 | 201 | cmd_helper.RunCmd(cmd) |
michael@0 | 202 | else: |
michael@0 | 203 | shutil.copy(pulled_coverage_file, |
michael@0 | 204 | TestRunner._COVERAGE_MERGED_FILENAME) |
michael@0 | 205 | os.remove(pulled_coverage_file) |
michael@0 | 206 | |
michael@0 | 207 | def GenerateCoverageReportIfNeeded(self): |
michael@0 | 208 | """Uses the Emma to generate a coverage report and a html page.""" |
michael@0 | 209 | if not self.coverage: |
michael@0 | 210 | return |
michael@0 | 211 | cmd = ['java', '-classpath', TestRunner._EMMA_JAR, |
michael@0 | 212 | 'emma', 'report', '-r', 'html', |
michael@0 | 213 | '-in', TestRunner._COVERAGE_MERGED_FILENAME, |
michael@0 | 214 | '-in', TestRunner._COVERAGE_META_INFO_PATH] |
michael@0 | 215 | cmd_helper.RunCmd(cmd) |
michael@0 | 216 | new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, |
michael@0 | 217 | time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) |
michael@0 | 218 | shutil.copytree('coverage', new_dir) |
michael@0 | 219 | |
michael@0 | 220 | latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, |
michael@0 | 221 | 'Latest_Coverage_Run') |
michael@0 | 222 | if os.path.exists(latest_dir): |
michael@0 | 223 | shutil.rmtree(latest_dir) |
michael@0 | 224 | os.mkdir(latest_dir) |
michael@0 | 225 | webserver_new_index = os.path.join(new_dir, 'index.html') |
michael@0 | 226 | webserver_new_files = os.path.join(new_dir, '_files') |
michael@0 | 227 | webserver_latest_index = os.path.join(latest_dir, 'index.html') |
michael@0 | 228 | webserver_latest_files = os.path.join(latest_dir, '_files') |
michael@0 | 229 | # Setup new softlinks to last result. |
michael@0 | 230 | os.symlink(webserver_new_index, webserver_latest_index) |
michael@0 | 231 | os.symlink(webserver_new_files, webserver_latest_files) |
michael@0 | 232 | cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) |
michael@0 | 233 | |
michael@0 | 234 | def _GetInstrumentationArgs(self): |
michael@0 | 235 | ret = {} |
michael@0 | 236 | if self.coverage: |
michael@0 | 237 | ret['coverage'] = 'true' |
michael@0 | 238 | if self.wait_for_debugger: |
michael@0 | 239 | ret['debug'] = 'true' |
michael@0 | 240 | return ret |
michael@0 | 241 | |
michael@0 | 242 | def _TakeScreenshot(self, test): |
michael@0 | 243 | """Takes a screenshot from the device.""" |
michael@0 | 244 | screenshot_tool = os.path.join(constants.CHROME_DIR, |
michael@0 | 245 | 'third_party/android_tools/sdk/tools/monkeyrunner') |
michael@0 | 246 | screenshot_script = os.path.join(constants.CHROME_DIR, |
michael@0 | 247 | 'build/android/monkeyrunner_screenshot.py') |
michael@0 | 248 | screenshot_path = os.path.join(constants.CHROME_DIR, |
michael@0 | 249 | 'out_screenshots') |
michael@0 | 250 | if not os.path.exists(screenshot_path): |
michael@0 | 251 | os.mkdir(screenshot_path) |
michael@0 | 252 | screenshot_name = os.path.join(screenshot_path, test + '.png') |
michael@0 | 253 | logging.info('Taking screenshot named %s', screenshot_name) |
michael@0 | 254 | cmd_helper.RunCmd([screenshot_tool, screenshot_script, |
michael@0 | 255 | '--serial', self.device, |
michael@0 | 256 | '--file', screenshot_name]) |
michael@0 | 257 | |
michael@0 | 258 | def SetUp(self): |
michael@0 | 259 | """Sets up the test harness and device before all tests are run.""" |
michael@0 | 260 | super(TestRunner, self).SetUp() |
michael@0 | 261 | if not self.adb.IsRootEnabled(): |
michael@0 | 262 | logging.warning('Unable to enable java asserts for %s, non rooted device', |
michael@0 | 263 | self.device) |
michael@0 | 264 | else: |
michael@0 | 265 | if self.adb.SetJavaAssertsEnabled(enable=True): |
michael@0 | 266 | self.adb.Reboot(full_reboot=False) |
michael@0 | 267 | |
michael@0 | 268 | # We give different default value to launch HTTP server based on shard index |
michael@0 | 269 | # because it may have race condition when multiple processes are trying to |
michael@0 | 270 | # launch lighttpd with same port at same time. |
michael@0 | 271 | http_server_ports = self.LaunchTestHttpServer( |
michael@0 | 272 | os.path.join(constants.CHROME_DIR), |
michael@0 | 273 | (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) |
michael@0 | 274 | if self.ports_to_forward: |
michael@0 | 275 | port_pairs = [(port, port) for port in self.ports_to_forward] |
michael@0 | 276 | # We need to remember which ports the HTTP server is using, since the |
michael@0 | 277 | # forwarder will stomp on them otherwise. |
michael@0 | 278 | port_pairs.append(http_server_ports) |
michael@0 | 279 | self.forwarder = Forwarder( |
michael@0 | 280 | self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type) |
michael@0 | 281 | self.CopyTestFilesOnce() |
michael@0 | 282 | self.flags.AddFlags(['--enable-test-intents']) |
michael@0 | 283 | |
michael@0 | 284 | def TearDown(self): |
michael@0 | 285 | """Cleans up the test harness and saves outstanding data from test run.""" |
michael@0 | 286 | if self.forwarder: |
michael@0 | 287 | self.forwarder.Close() |
michael@0 | 288 | self.GenerateCoverageReportIfNeeded() |
michael@0 | 289 | super(TestRunner, self).TearDown() |
michael@0 | 290 | |
michael@0 | 291 | def TestSetup(self, test): |
michael@0 | 292 | """Sets up the test harness for running a particular test. |
michael@0 | 293 | |
michael@0 | 294 | Args: |
michael@0 | 295 | test: The name of the test that will be run. |
michael@0 | 296 | """ |
michael@0 | 297 | self.SetupPerfMonitoringIfNeeded(test) |
michael@0 | 298 | self._SetupIndividualTestTimeoutScale(test) |
michael@0 | 299 | self.tool.SetupEnvironment() |
michael@0 | 300 | |
michael@0 | 301 | # Make sure the forwarder is still running. |
michael@0 | 302 | self.RestartHttpServerForwarderIfNecessary() |
michael@0 | 303 | |
michael@0 | 304 | def _IsPerfTest(self, test): |
michael@0 | 305 | """Determines whether a test is a performance test. |
michael@0 | 306 | |
michael@0 | 307 | Args: |
michael@0 | 308 | test: The name of the test to be checked. |
michael@0 | 309 | |
michael@0 | 310 | Returns: |
michael@0 | 311 | Whether the test is annotated as a performance test. |
michael@0 | 312 | """ |
michael@0 | 313 | return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) |
michael@0 | 314 | |
michael@0 | 315 | def SetupPerfMonitoringIfNeeded(self, test): |
michael@0 | 316 | """Sets up performance monitoring if the specified test requires it. |
michael@0 | 317 | |
michael@0 | 318 | Args: |
michael@0 | 319 | test: The name of the test to be run. |
michael@0 | 320 | """ |
michael@0 | 321 | if not self._IsPerfTest(test): |
michael@0 | 322 | return |
michael@0 | 323 | self.adb.Adb().SendCommand('shell rm ' + |
michael@0 | 324 | TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) |
michael@0 | 325 | self.adb.StartMonitoringLogcat() |
michael@0 | 326 | |
michael@0 | 327 | def TestTeardown(self, test, test_result): |
michael@0 | 328 | """Cleans up the test harness after running a particular test. |
michael@0 | 329 | |
michael@0 | 330 | Depending on the options of this TestRunner this might handle coverage |
michael@0 | 331 | tracking or performance tracking. This method will only be called if the |
michael@0 | 332 | test passed. |
michael@0 | 333 | |
michael@0 | 334 | Args: |
michael@0 | 335 | test: The name of the test that was just run. |
michael@0 | 336 | test_result: result for this test. |
michael@0 | 337 | """ |
michael@0 | 338 | |
michael@0 | 339 | self.tool.CleanUpEnvironment() |
michael@0 | 340 | |
michael@0 | 341 | # The logic below relies on the test passing. |
michael@0 | 342 | if not test_result or test_result.GetStatusCode(): |
michael@0 | 343 | return |
michael@0 | 344 | |
michael@0 | 345 | self.TearDownPerfMonitoring(test) |
michael@0 | 346 | self.SaveCoverageData(test) |
michael@0 | 347 | |
michael@0 | 348 | def TearDownPerfMonitoring(self, test): |
michael@0 | 349 | """Cleans up performance monitoring if the specified test required it. |
michael@0 | 350 | |
michael@0 | 351 | Args: |
michael@0 | 352 | test: The name of the test that was just run. |
michael@0 | 353 | Raises: |
michael@0 | 354 | FatalTestException: if there's anything wrong with the perf data. |
michael@0 | 355 | """ |
michael@0 | 356 | if not self._IsPerfTest(test): |
michael@0 | 357 | return |
michael@0 | 358 | raw_test_name = test.split('#')[1] |
michael@0 | 359 | |
michael@0 | 360 | # Wait and grab annotation data so we can figure out which traces to parse |
michael@0 | 361 | regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + |
michael@0 | 362 | raw_test_name + |
michael@0 | 363 | '\)\:(.*)'), None) |
michael@0 | 364 | |
michael@0 | 365 | # If the test is set to run on a specific device type only (IE: only |
michael@0 | 366 | # tablet or phone) and it is being run on the wrong device, the test |
michael@0 | 367 | # just quits and does not do anything. The java test harness will still |
michael@0 | 368 | # print the appropriate annotation for us, but will add --NORUN-- for |
michael@0 | 369 | # us so we know to ignore the results. |
michael@0 | 370 | # The --NORUN-- tag is managed by MainActivityTestBase.java |
michael@0 | 371 | if regex.group(1) != '--NORUN--': |
michael@0 | 372 | |
michael@0 | 373 | # Obtain the relevant perf data. The data is dumped to a |
michael@0 | 374 | # JSON formatted file. |
michael@0 | 375 | json_string = self.adb.GetFileContents( |
michael@0 | 376 | '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') |
michael@0 | 377 | |
michael@0 | 378 | if json_string: |
michael@0 | 379 | json_string = '\n'.join(json_string) |
michael@0 | 380 | else: |
michael@0 | 381 | raise FatalTestException('Perf file does not exist or is empty') |
michael@0 | 382 | |
michael@0 | 383 | if self.save_perf_json: |
michael@0 | 384 | json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name |
michael@0 | 385 | with open(json_local_file, 'w') as f: |
michael@0 | 386 | f.write(json_string) |
michael@0 | 387 | logging.info('Saving Perf UI JSON from test ' + |
michael@0 | 388 | test + ' to ' + json_local_file) |
michael@0 | 389 | |
michael@0 | 390 | raw_perf_data = regex.group(1).split(';') |
michael@0 | 391 | |
michael@0 | 392 | for raw_perf_set in raw_perf_data: |
michael@0 | 393 | if raw_perf_set: |
michael@0 | 394 | perf_set = raw_perf_set.split(',') |
michael@0 | 395 | if len(perf_set) != 3: |
michael@0 | 396 | raise FatalTestException('Unexpected number of tokens in ' |
michael@0 | 397 | 'perf annotation string: ' + raw_perf_set) |
michael@0 | 398 | |
michael@0 | 399 | # Process the performance data |
michael@0 | 400 | result = GetAverageRunInfoFromJSONString(json_string, perf_set[0]) |
michael@0 | 401 | |
michael@0 | 402 | PrintPerfResult(perf_set[1], perf_set[2], |
michael@0 | 403 | [result['average']], result['units']) |
michael@0 | 404 | |
michael@0 | 405 | def _SetupIndividualTestTimeoutScale(self, test): |
michael@0 | 406 | timeout_scale = self._GetIndividualTestTimeoutScale(test) |
michael@0 | 407 | valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) |
michael@0 | 408 | |
michael@0 | 409 | def _GetIndividualTestTimeoutScale(self, test): |
michael@0 | 410 | """Returns the timeout scale for the given |test|.""" |
michael@0 | 411 | annotations = self.apks[0].GetTestAnnotations(test) |
michael@0 | 412 | timeout_scale = 1 |
michael@0 | 413 | if 'TimeoutScale' in annotations: |
michael@0 | 414 | for annotation in annotations: |
michael@0 | 415 | scale_match = re.match('TimeoutScale:([0-9]+)', annotation) |
michael@0 | 416 | if scale_match: |
michael@0 | 417 | timeout_scale = int(scale_match.group(1)) |
michael@0 | 418 | if self.wait_for_debugger: |
michael@0 | 419 | timeout_scale *= 100 |
michael@0 | 420 | return timeout_scale |
michael@0 | 421 | |
michael@0 | 422 | def _GetIndividualTestTimeoutSecs(self, test): |
michael@0 | 423 | """Returns the timeout in seconds for the given |test|.""" |
michael@0 | 424 | annotations = self.apks[0].GetTestAnnotations(test) |
michael@0 | 425 | if 'Manual' in annotations: |
michael@0 | 426 | return 600 * 60 |
michael@0 | 427 | if 'External' in annotations: |
michael@0 | 428 | return 10 * 60 |
michael@0 | 429 | if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: |
michael@0 | 430 | return 5 * 60 |
michael@0 | 431 | if 'MediumTest' in annotations: |
michael@0 | 432 | return 3 * 60 |
michael@0 | 433 | return 1 * 60 |
michael@0 | 434 | |
michael@0 | 435 | def RunTests(self): |
michael@0 | 436 | """Runs the tests, generating the coverage if needed. |
michael@0 | 437 | |
michael@0 | 438 | Returns: |
michael@0 | 439 | A TestResults object. |
michael@0 | 440 | """ |
michael@0 | 441 | instrumentation_path = (self.instrumentation_class_path + |
michael@0 | 442 | '/android.test.InstrumentationTestRunner') |
michael@0 | 443 | instrumentation_args = self._GetInstrumentationArgs() |
michael@0 | 444 | for test in self._GetTestsIter(): |
michael@0 | 445 | test_result = None |
michael@0 | 446 | start_date_ms = None |
michael@0 | 447 | try: |
michael@0 | 448 | self.TestSetup(test) |
michael@0 | 449 | start_date_ms = int(time.time()) * 1000 |
michael@0 | 450 | args_with_filter = dict(instrumentation_args) |
michael@0 | 451 | args_with_filter['class'] = test |
michael@0 | 452 | # |test_results| is a list that should contain |
michael@0 | 453 | # a single TestResult object. |
michael@0 | 454 | logging.warn(args_with_filter) |
michael@0 | 455 | (test_results, _) = self.adb.Adb().StartInstrumentation( |
michael@0 | 456 | instrumentation_path=instrumentation_path, |
michael@0 | 457 | instrumentation_args=args_with_filter, |
michael@0 | 458 | timeout_time=(self._GetIndividualTestTimeoutSecs(test) * |
michael@0 | 459 | self._GetIndividualTestTimeoutScale(test) * |
michael@0 | 460 | self.tool.GetTimeoutScale())) |
michael@0 | 461 | duration_ms = int(time.time()) * 1000 - start_date_ms |
michael@0 | 462 | assert len(test_results) == 1 |
michael@0 | 463 | test_result = test_results[0] |
michael@0 | 464 | status_code = test_result.GetStatusCode() |
michael@0 | 465 | if status_code: |
michael@0 | 466 | log = test_result.GetFailureReason() |
michael@0 | 467 | if not log: |
michael@0 | 468 | log = 'No information.' |
michael@0 | 469 | if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: |
michael@0 | 470 | self._TakeScreenshot(test) |
michael@0 | 471 | self.test_results.failed += [SingleTestResult(test, start_date_ms, |
michael@0 | 472 | duration_ms, log)] |
michael@0 | 473 | else: |
michael@0 | 474 | result = [SingleTestResult(test, start_date_ms, duration_ms)] |
michael@0 | 475 | self.test_results.ok += result |
michael@0 | 476 | # Catch exceptions thrown by StartInstrumentation(). |
michael@0 | 477 | # See ../../third_party/android/testrunner/adb_interface.py |
michael@0 | 478 | except (errors.WaitForResponseTimedOutError, |
michael@0 | 479 | errors.DeviceUnresponsiveError, |
michael@0 | 480 | errors.InstrumentationError), e: |
michael@0 | 481 | if start_date_ms: |
michael@0 | 482 | duration_ms = int(time.time()) * 1000 - start_date_ms |
michael@0 | 483 | else: |
michael@0 | 484 | start_date_ms = int(time.time()) * 1000 |
michael@0 | 485 | duration_ms = 0 |
michael@0 | 486 | message = str(e) |
michael@0 | 487 | if not message: |
michael@0 | 488 | message = 'No information.' |
michael@0 | 489 | self.test_results.crashed += [SingleTestResult(test, start_date_ms, |
michael@0 | 490 | duration_ms, |
michael@0 | 491 | message)] |
michael@0 | 492 | test_result = None |
michael@0 | 493 | self.TestTeardown(test, test_result) |
michael@0 | 494 | return self.test_results |
michael@0 | 495 | |
michael@0 | 496 | |
michael@0 | 497 | class TestSharder(BaseTestSharder): |
michael@0 | 498 | """Responsible for sharding the tests on the connected devices.""" |
michael@0 | 499 | |
michael@0 | 500 | def __init__(self, attached_devices, options, tests, apks): |
michael@0 | 501 | BaseTestSharder.__init__(self, attached_devices) |
michael@0 | 502 | self.options = options |
michael@0 | 503 | self.tests = tests |
michael@0 | 504 | self.apks = apks |
michael@0 | 505 | |
michael@0 | 506 | def SetupSharding(self, tests): |
michael@0 | 507 | """Called before starting the shards.""" |
michael@0 | 508 | SetTestsContainer(sharded_tests_queue.ShardedTestsQueue( |
michael@0 | 509 | len(self.attached_devices), tests)) |
michael@0 | 510 | |
michael@0 | 511 | def CreateShardedTestRunner(self, device, index): |
michael@0 | 512 | """Creates a sharded test runner. |
michael@0 | 513 | |
michael@0 | 514 | Args: |
michael@0 | 515 | device: Device serial where this shard will run. |
michael@0 | 516 | index: Index of this device in the pool. |
michael@0 | 517 | |
michael@0 | 518 | Returns: |
michael@0 | 519 | A TestRunner object. |
michael@0 | 520 | """ |
michael@0 | 521 | return TestRunner(self.options, device, None, False, index, self.apks, []) |
michael@0 | 522 | |
michael@0 | 523 | |
michael@0 | 524 | def DispatchJavaTests(options, apks): |
michael@0 | 525 | """Dispatches Java tests onto connected device(s). |
michael@0 | 526 | |
michael@0 | 527 | If possible, this method will attempt to shard the tests to |
michael@0 | 528 | all connected devices. Otherwise, dispatch and run tests on one device. |
michael@0 | 529 | |
michael@0 | 530 | Args: |
michael@0 | 531 | options: Command line options. |
michael@0 | 532 | apks: list of APKs to use. |
michael@0 | 533 | |
michael@0 | 534 | Returns: |
michael@0 | 535 | A TestResults object holding the results of the Java tests. |
michael@0 | 536 | |
michael@0 | 537 | Raises: |
michael@0 | 538 | FatalTestException: when there's no attached the devices. |
michael@0 | 539 | """ |
michael@0 | 540 | test_apk = apks[0] |
michael@0 | 541 | if options.annotation: |
michael@0 | 542 | available_tests = test_apk.GetAnnotatedTests(options.annotation) |
michael@0 | 543 | if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest': |
michael@0 | 544 | tests_without_annotation = [ |
michael@0 | 545 | m for m in |
michael@0 | 546 | test_apk.GetTestMethods() |
michael@0 | 547 | if not test_apk.GetTestAnnotations(m) and |
michael@0 | 548 | not apk_info.ApkInfo.IsPythonDrivenTest(m)] |
michael@0 | 549 | if tests_without_annotation: |
michael@0 | 550 | tests_without_annotation.sort() |
michael@0 | 551 | logging.warning('The following tests do not contain any annotation. ' |
michael@0 | 552 | 'Assuming "SmallTest":\n%s', |
michael@0 | 553 | '\n'.join(tests_without_annotation)) |
michael@0 | 554 | available_tests += tests_without_annotation |
michael@0 | 555 | else: |
michael@0 | 556 | available_tests = [m for m in test_apk.GetTestMethods() |
michael@0 | 557 | if not apk_info.ApkInfo.IsPythonDrivenTest(m)] |
michael@0 | 558 | coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' |
michael@0 | 559 | |
michael@0 | 560 | tests = [] |
michael@0 | 561 | if options.test_filter: |
michael@0 | 562 | # |available_tests| are in adb instrument format: package.path.class#test. |
michael@0 | 563 | filter_without_hash = options.test_filter.replace('#', '.') |
michael@0 | 564 | tests = [t for t in available_tests |
michael@0 | 565 | if filter_without_hash in t.replace('#', '.')] |
michael@0 | 566 | else: |
michael@0 | 567 | tests = available_tests |
michael@0 | 568 | |
michael@0 | 569 | if not tests: |
michael@0 | 570 | logging.warning('No Java tests to run with current args.') |
michael@0 | 571 | return TestResults() |
michael@0 | 572 | |
michael@0 | 573 | tests *= options.number_of_runs |
michael@0 | 574 | |
michael@0 | 575 | attached_devices = android_commands.GetAttachedDevices() |
michael@0 | 576 | test_results = TestResults() |
michael@0 | 577 | |
michael@0 | 578 | if not attached_devices: |
michael@0 | 579 | raise FatalTestException('You have no devices attached or visible!') |
michael@0 | 580 | if options.device: |
michael@0 | 581 | attached_devices = [options.device] |
michael@0 | 582 | |
michael@0 | 583 | logging.info('Will run: %s', str(tests)) |
michael@0 | 584 | |
michael@0 | 585 | if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): |
michael@0 | 586 | logging.warning('Coverage / debugger can not be sharded, ' |
michael@0 | 587 | 'using first available device') |
michael@0 | 588 | attached_devices = attached_devices[:1] |
michael@0 | 589 | sharder = TestSharder(attached_devices, options, tests, apks) |
michael@0 | 590 | test_results = sharder.RunShardedTests() |
michael@0 | 591 | return test_results |