michael@0: # Copyright (c) 2012 The Chromium Authors. All rights reserved. michael@0: # Use of this source code is governed by a BSD-style license that can be michael@0: # found in the LICENSE file. michael@0: michael@0: """Runs the Java tests. See more information on run_instrumentation_tests.py.""" michael@0: michael@0: import fnmatch michael@0: import logging michael@0: import os michael@0: import re michael@0: import shutil michael@0: import sys michael@0: import time michael@0: michael@0: import android_commands michael@0: import apk_info michael@0: from base_test_runner import BaseTestRunner michael@0: from base_test_sharder import BaseTestSharder, SetTestsContainer michael@0: import cmd_helper michael@0: import constants michael@0: import errors michael@0: from forwarder import Forwarder michael@0: from json_perf_parser import GetAverageRunInfoFromJSONString michael@0: from perf_tests_helper import PrintPerfResult michael@0: import sharded_tests_queue michael@0: from test_result import SingleTestResult, TestResults michael@0: import valgrind_tools michael@0: michael@0: _PERF_TEST_ANNOTATION = 'PerfTest' michael@0: michael@0: michael@0: class FatalTestException(Exception): michael@0: """A fatal test exception.""" michael@0: pass michael@0: michael@0: michael@0: def _TestNameToExpectation(test_name): michael@0: # A test name is a Package.Path.Class#testName; convert to what we use in michael@0: # the expectation file. michael@0: return '.'.join(test_name.replace('#', '.').split('.')[-2:]) michael@0: michael@0: michael@0: def FilterTests(test_names, pattern_list, inclusive): michael@0: """Filters |test_names| using a list of patterns. michael@0: michael@0: Args: michael@0: test_names: A list of test names. michael@0: pattern_list: A list of patterns. michael@0: inclusive: If True, returns the tests that match any pattern. if False, michael@0: returns the tests that do not match any pattern. michael@0: Returns: michael@0: A list of test names. michael@0: """ michael@0: ret = [] michael@0: for t in test_names: michael@0: has_match = False michael@0: for pattern in pattern_list: michael@0: has_match = has_match or fnmatch.fnmatch(_TestNameToExpectation(t), michael@0: pattern) michael@0: if has_match == inclusive: michael@0: ret += [t] michael@0: return ret michael@0: michael@0: michael@0: class TestRunner(BaseTestRunner): michael@0: """Responsible for running a series of tests connected to a single device.""" michael@0: michael@0: _DEVICE_DATA_DIR = 'chrome/test/data' michael@0: _EMMA_JAR = os.path.join(os.environ.get('ANDROID_BUILD_TOP', ''), michael@0: 'external/emma/lib/emma.jar') michael@0: _COVERAGE_MERGED_FILENAME = 'unittest_coverage.es' michael@0: _COVERAGE_WEB_ROOT_DIR = os.environ.get('EMMA_WEB_ROOTDIR') michael@0: _COVERAGE_FILENAME = 'coverage.ec' michael@0: _COVERAGE_RESULT_PATH = ('/data/data/com.google.android.apps.chrome/files/' + michael@0: _COVERAGE_FILENAME) michael@0: _COVERAGE_META_INFO_PATH = os.path.join(os.environ.get('ANDROID_BUILD_TOP', michael@0: ''), michael@0: 'out/target/common/obj/APPS', michael@0: 'Chrome_intermediates/coverage.em') michael@0: _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile' michael@0: _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR + michael@0: '/chrome-profile*') michael@0: _DEVICE_HAS_TEST_FILES = {} michael@0: michael@0: def __init__(self, options, device, tests_iter, coverage, shard_index, apks, michael@0: ports_to_forward): michael@0: """Create a new TestRunner. michael@0: michael@0: Args: michael@0: options: An options object with the following required attributes: michael@0: - build_type: 'Release' or 'Debug'. michael@0: - install_apk: Re-installs the apk if opted. michael@0: - save_perf_json: Whether or not to save the JSON file from UI perf michael@0: tests. michael@0: - screenshot_failures: Take a screenshot for a test failure michael@0: - tool: Name of the Valgrind tool. michael@0: - wait_for_debugger: blocks until the debugger is connected. michael@0: device: Attached android device. michael@0: tests_iter: A list of tests to be run. michael@0: coverage: Collects coverage information if opted. michael@0: shard_index: shard # for this TestRunner, used to create unique port michael@0: numbers. michael@0: apks: A list of ApkInfo objects need to be installed. The first element michael@0: should be the tests apk, the rests could be the apks used in test. michael@0: The default is ChromeTest.apk. michael@0: ports_to_forward: A list of port numbers for which to set up forwarders. michael@0: Can be optionally requested by a test case. michael@0: Raises: michael@0: FatalTestException: if coverage metadata is not available. michael@0: """ michael@0: BaseTestRunner.__init__( michael@0: self, device, options.tool, shard_index, options.build_type) michael@0: michael@0: if not apks: michael@0: apks = [apk_info.ApkInfo(options.test_apk_path, michael@0: options.test_apk_jar_path)] michael@0: michael@0: self.build_type = options.build_type michael@0: self.install_apk = options.install_apk michael@0: self.save_perf_json = options.save_perf_json michael@0: self.screenshot_failures = options.screenshot_failures michael@0: self.wait_for_debugger = options.wait_for_debugger michael@0: michael@0: self.tests_iter = tests_iter michael@0: self.coverage = coverage michael@0: self.apks = apks michael@0: self.test_apk = apks[0] michael@0: self.instrumentation_class_path = self.test_apk.GetPackageName() michael@0: self.ports_to_forward = ports_to_forward michael@0: michael@0: self.test_results = TestResults() michael@0: self.forwarder = None michael@0: michael@0: if self.coverage: michael@0: if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): michael@0: os.remove(TestRunner._COVERAGE_MERGED_FILENAME) michael@0: if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH): michael@0: raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + michael@0: ' : Coverage meta info [' + michael@0: TestRunner._COVERAGE_META_INFO_PATH + michael@0: '] does not exist.') michael@0: if (not TestRunner._COVERAGE_WEB_ROOT_DIR or michael@0: not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)): michael@0: raise FatalTestException('FATAL ERROR in ' + sys.argv[0] + michael@0: ' : Path specified in $EMMA_WEB_ROOTDIR [' + michael@0: TestRunner._COVERAGE_WEB_ROOT_DIR + michael@0: '] does not exist.') michael@0: michael@0: def _GetTestsIter(self): michael@0: if not self.tests_iter: michael@0: # multiprocessing.Queue can't be pickled across processes if we have it as michael@0: # a member set during constructor. Grab one here instead. michael@0: self.tests_iter = (BaseTestSharder.tests_container) michael@0: assert self.tests_iter michael@0: return self.tests_iter michael@0: michael@0: def CopyTestFilesOnce(self): michael@0: """Pushes the test data files to the device. Installs the apk if opted.""" michael@0: if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False): michael@0: logging.warning('Already copied test files to device %s, skipping.', michael@0: self.device) michael@0: return michael@0: host_test_files = [ michael@0: ('android_webview/test/data/device_files', 'webview'), michael@0: ('content/test/data/android/device_files', 'content'), michael@0: ('chrome/test/data/android/device_files', 'chrome') michael@0: ] michael@0: for (host_src, dst_layer) in host_test_files: michael@0: host_test_files_path = constants.CHROME_DIR + '/' + host_src michael@0: if os.path.exists(host_test_files_path): michael@0: self.adb.PushIfNeeded(host_test_files_path, michael@0: self.adb.GetExternalStorage() + '/' + michael@0: TestRunner._DEVICE_DATA_DIR + '/' + dst_layer) michael@0: if self.install_apk: michael@0: for apk in self.apks: michael@0: self.adb.ManagedInstall(apk.GetApkPath(), michael@0: package_name=apk.GetPackageName()) michael@0: self.tool.CopyFiles() michael@0: TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True michael@0: michael@0: def SaveCoverageData(self, test): michael@0: """Saves the Emma coverage data before it's overwritten by the next test. michael@0: michael@0: Args: michael@0: test: the test whose coverage data is collected. michael@0: """ michael@0: if not self.coverage: michael@0: return michael@0: if not self.adb.Adb().Pull(TestRunner._COVERAGE_RESULT_PATH, michael@0: constants.CHROME_DIR): michael@0: logging.error('ERROR: Unable to find file ' + michael@0: TestRunner._COVERAGE_RESULT_PATH + michael@0: ' on the device for test ' + test) michael@0: pulled_coverage_file = os.path.join(constants.CHROME_DIR, michael@0: TestRunner._COVERAGE_FILENAME) michael@0: if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME): michael@0: cmd = ['java', '-classpath', TestRunner._EMMA_JAR, 'emma', 'merge', michael@0: '-in', pulled_coverage_file, michael@0: '-in', TestRunner._COVERAGE_MERGED_FILENAME, michael@0: '-out', TestRunner._COVERAGE_MERGED_FILENAME] michael@0: cmd_helper.RunCmd(cmd) michael@0: else: michael@0: shutil.copy(pulled_coverage_file, michael@0: TestRunner._COVERAGE_MERGED_FILENAME) michael@0: os.remove(pulled_coverage_file) michael@0: michael@0: def GenerateCoverageReportIfNeeded(self): michael@0: """Uses the Emma to generate a coverage report and a html page.""" michael@0: if not self.coverage: michael@0: return michael@0: cmd = ['java', '-classpath', TestRunner._EMMA_JAR, michael@0: 'emma', 'report', '-r', 'html', michael@0: '-in', TestRunner._COVERAGE_MERGED_FILENAME, michael@0: '-in', TestRunner._COVERAGE_META_INFO_PATH] michael@0: cmd_helper.RunCmd(cmd) michael@0: new_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, michael@0: time.strftime('Coverage_for_%Y_%m_%d_%a_%H:%M')) michael@0: shutil.copytree('coverage', new_dir) michael@0: michael@0: latest_dir = os.path.join(TestRunner._COVERAGE_WEB_ROOT_DIR, michael@0: 'Latest_Coverage_Run') michael@0: if os.path.exists(latest_dir): michael@0: shutil.rmtree(latest_dir) michael@0: os.mkdir(latest_dir) michael@0: webserver_new_index = os.path.join(new_dir, 'index.html') michael@0: webserver_new_files = os.path.join(new_dir, '_files') michael@0: webserver_latest_index = os.path.join(latest_dir, 'index.html') michael@0: webserver_latest_files = os.path.join(latest_dir, '_files') michael@0: # Setup new softlinks to last result. michael@0: os.symlink(webserver_new_index, webserver_latest_index) michael@0: os.symlink(webserver_new_files, webserver_latest_files) michael@0: cmd_helper.RunCmd(['chmod', '755', '-R', latest_dir, new_dir]) michael@0: michael@0: def _GetInstrumentationArgs(self): michael@0: ret = {} michael@0: if self.coverage: michael@0: ret['coverage'] = 'true' michael@0: if self.wait_for_debugger: michael@0: ret['debug'] = 'true' michael@0: return ret michael@0: michael@0: def _TakeScreenshot(self, test): michael@0: """Takes a screenshot from the device.""" michael@0: screenshot_tool = os.path.join(constants.CHROME_DIR, michael@0: 'third_party/android_tools/sdk/tools/monkeyrunner') michael@0: screenshot_script = os.path.join(constants.CHROME_DIR, michael@0: 'build/android/monkeyrunner_screenshot.py') michael@0: screenshot_path = os.path.join(constants.CHROME_DIR, michael@0: 'out_screenshots') michael@0: if not os.path.exists(screenshot_path): michael@0: os.mkdir(screenshot_path) michael@0: screenshot_name = os.path.join(screenshot_path, test + '.png') michael@0: logging.info('Taking screenshot named %s', screenshot_name) michael@0: cmd_helper.RunCmd([screenshot_tool, screenshot_script, michael@0: '--serial', self.device, michael@0: '--file', screenshot_name]) michael@0: michael@0: def SetUp(self): michael@0: """Sets up the test harness and device before all tests are run.""" michael@0: super(TestRunner, self).SetUp() michael@0: if not self.adb.IsRootEnabled(): michael@0: logging.warning('Unable to enable java asserts for %s, non rooted device', michael@0: self.device) michael@0: else: michael@0: if self.adb.SetJavaAssertsEnabled(enable=True): michael@0: self.adb.Reboot(full_reboot=False) michael@0: michael@0: # We give different default value to launch HTTP server based on shard index michael@0: # because it may have race condition when multiple processes are trying to michael@0: # launch lighttpd with same port at same time. michael@0: http_server_ports = self.LaunchTestHttpServer( michael@0: os.path.join(constants.CHROME_DIR), michael@0: (constants.LIGHTTPD_RANDOM_PORT_FIRST + self.shard_index)) michael@0: if self.ports_to_forward: michael@0: port_pairs = [(port, port) for port in self.ports_to_forward] michael@0: # We need to remember which ports the HTTP server is using, since the michael@0: # forwarder will stomp on them otherwise. michael@0: port_pairs.append(http_server_ports) michael@0: self.forwarder = Forwarder( michael@0: self.adb, port_pairs, self.tool, '127.0.0.1', self.build_type) michael@0: self.CopyTestFilesOnce() michael@0: self.flags.AddFlags(['--enable-test-intents']) michael@0: michael@0: def TearDown(self): michael@0: """Cleans up the test harness and saves outstanding data from test run.""" michael@0: if self.forwarder: michael@0: self.forwarder.Close() michael@0: self.GenerateCoverageReportIfNeeded() michael@0: super(TestRunner, self).TearDown() michael@0: michael@0: def TestSetup(self, test): michael@0: """Sets up the test harness for running a particular test. michael@0: michael@0: Args: michael@0: test: The name of the test that will be run. michael@0: """ michael@0: self.SetupPerfMonitoringIfNeeded(test) michael@0: self._SetupIndividualTestTimeoutScale(test) michael@0: self.tool.SetupEnvironment() michael@0: michael@0: # Make sure the forwarder is still running. michael@0: self.RestartHttpServerForwarderIfNecessary() michael@0: michael@0: def _IsPerfTest(self, test): michael@0: """Determines whether a test is a performance test. michael@0: michael@0: Args: michael@0: test: The name of the test to be checked. michael@0: michael@0: Returns: michael@0: Whether the test is annotated as a performance test. michael@0: """ michael@0: return _PERF_TEST_ANNOTATION in self.test_apk.GetTestAnnotations(test) michael@0: michael@0: def SetupPerfMonitoringIfNeeded(self, test): michael@0: """Sets up performance monitoring if the specified test requires it. michael@0: michael@0: Args: michael@0: test: The name of the test to be run. michael@0: """ michael@0: if not self._IsPerfTest(test): michael@0: return michael@0: self.adb.Adb().SendCommand('shell rm ' + michael@0: TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX) michael@0: self.adb.StartMonitoringLogcat() michael@0: michael@0: def TestTeardown(self, test, test_result): michael@0: """Cleans up the test harness after running a particular test. michael@0: michael@0: Depending on the options of this TestRunner this might handle coverage michael@0: tracking or performance tracking. This method will only be called if the michael@0: test passed. michael@0: michael@0: Args: michael@0: test: The name of the test that was just run. michael@0: test_result: result for this test. michael@0: """ michael@0: michael@0: self.tool.CleanUpEnvironment() michael@0: michael@0: # The logic below relies on the test passing. michael@0: if not test_result or test_result.GetStatusCode(): michael@0: return michael@0: michael@0: self.TearDownPerfMonitoring(test) michael@0: self.SaveCoverageData(test) michael@0: michael@0: def TearDownPerfMonitoring(self, test): michael@0: """Cleans up performance monitoring if the specified test required it. michael@0: michael@0: Args: michael@0: test: The name of the test that was just run. michael@0: Raises: michael@0: FatalTestException: if there's anything wrong with the perf data. michael@0: """ michael@0: if not self._IsPerfTest(test): michael@0: return michael@0: raw_test_name = test.split('#')[1] michael@0: michael@0: # Wait and grab annotation data so we can figure out which traces to parse michael@0: regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' + michael@0: raw_test_name + michael@0: '\)\:(.*)'), None) michael@0: michael@0: # If the test is set to run on a specific device type only (IE: only michael@0: # tablet or phone) and it is being run on the wrong device, the test michael@0: # just quits and does not do anything. The java test harness will still michael@0: # print the appropriate annotation for us, but will add --NORUN-- for michael@0: # us so we know to ignore the results. michael@0: # The --NORUN-- tag is managed by MainActivityTestBase.java michael@0: if regex.group(1) != '--NORUN--': michael@0: michael@0: # Obtain the relevant perf data. The data is dumped to a michael@0: # JSON formatted file. michael@0: json_string = self.adb.GetFileContents( michael@0: '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt') michael@0: michael@0: if json_string: michael@0: json_string = '\n'.join(json_string) michael@0: else: michael@0: raise FatalTestException('Perf file does not exist or is empty') michael@0: michael@0: if self.save_perf_json: michael@0: json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name michael@0: with open(json_local_file, 'w') as f: michael@0: f.write(json_string) michael@0: logging.info('Saving Perf UI JSON from test ' + michael@0: test + ' to ' + json_local_file) michael@0: michael@0: raw_perf_data = regex.group(1).split(';') michael@0: michael@0: for raw_perf_set in raw_perf_data: michael@0: if raw_perf_set: michael@0: perf_set = raw_perf_set.split(',') michael@0: if len(perf_set) != 3: michael@0: raise FatalTestException('Unexpected number of tokens in ' michael@0: 'perf annotation string: ' + raw_perf_set) michael@0: michael@0: # Process the performance data michael@0: result = GetAverageRunInfoFromJSONString(json_string, perf_set[0]) michael@0: michael@0: PrintPerfResult(perf_set[1], perf_set[2], michael@0: [result['average']], result['units']) michael@0: michael@0: def _SetupIndividualTestTimeoutScale(self, test): michael@0: timeout_scale = self._GetIndividualTestTimeoutScale(test) michael@0: valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale) michael@0: michael@0: def _GetIndividualTestTimeoutScale(self, test): michael@0: """Returns the timeout scale for the given |test|.""" michael@0: annotations = self.apks[0].GetTestAnnotations(test) michael@0: timeout_scale = 1 michael@0: if 'TimeoutScale' in annotations: michael@0: for annotation in annotations: michael@0: scale_match = re.match('TimeoutScale:([0-9]+)', annotation) michael@0: if scale_match: michael@0: timeout_scale = int(scale_match.group(1)) michael@0: if self.wait_for_debugger: michael@0: timeout_scale *= 100 michael@0: return timeout_scale michael@0: michael@0: def _GetIndividualTestTimeoutSecs(self, test): michael@0: """Returns the timeout in seconds for the given |test|.""" michael@0: annotations = self.apks[0].GetTestAnnotations(test) michael@0: if 'Manual' in annotations: michael@0: return 600 * 60 michael@0: if 'External' in annotations: michael@0: return 10 * 60 michael@0: if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations: michael@0: return 5 * 60 michael@0: if 'MediumTest' in annotations: michael@0: return 3 * 60 michael@0: return 1 * 60 michael@0: michael@0: def RunTests(self): michael@0: """Runs the tests, generating the coverage if needed. michael@0: michael@0: Returns: michael@0: A TestResults object. michael@0: """ michael@0: instrumentation_path = (self.instrumentation_class_path + michael@0: '/android.test.InstrumentationTestRunner') michael@0: instrumentation_args = self._GetInstrumentationArgs() michael@0: for test in self._GetTestsIter(): michael@0: test_result = None michael@0: start_date_ms = None michael@0: try: michael@0: self.TestSetup(test) michael@0: start_date_ms = int(time.time()) * 1000 michael@0: args_with_filter = dict(instrumentation_args) michael@0: args_with_filter['class'] = test michael@0: # |test_results| is a list that should contain michael@0: # a single TestResult object. michael@0: logging.warn(args_with_filter) michael@0: (test_results, _) = self.adb.Adb().StartInstrumentation( michael@0: instrumentation_path=instrumentation_path, michael@0: instrumentation_args=args_with_filter, michael@0: timeout_time=(self._GetIndividualTestTimeoutSecs(test) * michael@0: self._GetIndividualTestTimeoutScale(test) * michael@0: self.tool.GetTimeoutScale())) michael@0: duration_ms = int(time.time()) * 1000 - start_date_ms michael@0: assert len(test_results) == 1 michael@0: test_result = test_results[0] michael@0: status_code = test_result.GetStatusCode() michael@0: if status_code: michael@0: log = test_result.GetFailureReason() michael@0: if not log: michael@0: log = 'No information.' michael@0: if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0: michael@0: self._TakeScreenshot(test) michael@0: self.test_results.failed += [SingleTestResult(test, start_date_ms, michael@0: duration_ms, log)] michael@0: else: michael@0: result = [SingleTestResult(test, start_date_ms, duration_ms)] michael@0: self.test_results.ok += result michael@0: # Catch exceptions thrown by StartInstrumentation(). michael@0: # See ../../third_party/android/testrunner/adb_interface.py michael@0: except (errors.WaitForResponseTimedOutError, michael@0: errors.DeviceUnresponsiveError, michael@0: errors.InstrumentationError), e: michael@0: if start_date_ms: michael@0: duration_ms = int(time.time()) * 1000 - start_date_ms michael@0: else: michael@0: start_date_ms = int(time.time()) * 1000 michael@0: duration_ms = 0 michael@0: message = str(e) michael@0: if not message: michael@0: message = 'No information.' michael@0: self.test_results.crashed += [SingleTestResult(test, start_date_ms, michael@0: duration_ms, michael@0: message)] michael@0: test_result = None michael@0: self.TestTeardown(test, test_result) michael@0: return self.test_results michael@0: michael@0: michael@0: class TestSharder(BaseTestSharder): michael@0: """Responsible for sharding the tests on the connected devices.""" michael@0: michael@0: def __init__(self, attached_devices, options, tests, apks): michael@0: BaseTestSharder.__init__(self, attached_devices) michael@0: self.options = options michael@0: self.tests = tests michael@0: self.apks = apks michael@0: michael@0: def SetupSharding(self, tests): michael@0: """Called before starting the shards.""" michael@0: SetTestsContainer(sharded_tests_queue.ShardedTestsQueue( michael@0: len(self.attached_devices), tests)) michael@0: michael@0: def CreateShardedTestRunner(self, device, index): michael@0: """Creates a sharded test runner. michael@0: michael@0: Args: michael@0: device: Device serial where this shard will run. michael@0: index: Index of this device in the pool. michael@0: michael@0: Returns: michael@0: A TestRunner object. michael@0: """ michael@0: return TestRunner(self.options, device, None, False, index, self.apks, []) michael@0: michael@0: michael@0: def DispatchJavaTests(options, apks): michael@0: """Dispatches Java tests onto connected device(s). michael@0: michael@0: If possible, this method will attempt to shard the tests to michael@0: all connected devices. Otherwise, dispatch and run tests on one device. michael@0: michael@0: Args: michael@0: options: Command line options. michael@0: apks: list of APKs to use. michael@0: michael@0: Returns: michael@0: A TestResults object holding the results of the Java tests. michael@0: michael@0: Raises: michael@0: FatalTestException: when there's no attached the devices. michael@0: """ michael@0: test_apk = apks[0] michael@0: if options.annotation: michael@0: available_tests = test_apk.GetAnnotatedTests(options.annotation) michael@0: if len(options.annotation) == 1 and options.annotation[0] == 'SmallTest': michael@0: tests_without_annotation = [ michael@0: m for m in michael@0: test_apk.GetTestMethods() michael@0: if not test_apk.GetTestAnnotations(m) and michael@0: not apk_info.ApkInfo.IsPythonDrivenTest(m)] michael@0: if tests_without_annotation: michael@0: tests_without_annotation.sort() michael@0: logging.warning('The following tests do not contain any annotation. ' michael@0: 'Assuming "SmallTest":\n%s', michael@0: '\n'.join(tests_without_annotation)) michael@0: available_tests += tests_without_annotation michael@0: else: michael@0: available_tests = [m for m in test_apk.GetTestMethods() michael@0: if not apk_info.ApkInfo.IsPythonDrivenTest(m)] michael@0: coverage = os.environ.get('EMMA_INSTRUMENT') == 'true' michael@0: michael@0: tests = [] michael@0: if options.test_filter: michael@0: # |available_tests| are in adb instrument format: package.path.class#test. michael@0: filter_without_hash = options.test_filter.replace('#', '.') michael@0: tests = [t for t in available_tests michael@0: if filter_without_hash in t.replace('#', '.')] michael@0: else: michael@0: tests = available_tests michael@0: michael@0: if not tests: michael@0: logging.warning('No Java tests to run with current args.') michael@0: return TestResults() michael@0: michael@0: tests *= options.number_of_runs michael@0: michael@0: attached_devices = android_commands.GetAttachedDevices() michael@0: test_results = TestResults() michael@0: michael@0: if not attached_devices: michael@0: raise FatalTestException('You have no devices attached or visible!') michael@0: if options.device: michael@0: attached_devices = [options.device] michael@0: michael@0: logging.info('Will run: %s', str(tests)) michael@0: michael@0: if len(attached_devices) > 1 and (coverage or options.wait_for_debugger): michael@0: logging.warning('Coverage / debugger can not be sharded, ' michael@0: 'using first available device') michael@0: attached_devices = attached_devices[:1] michael@0: sharder = TestSharder(attached_devices, options, tests, apks) michael@0: test_results = sharder.RunShardedTests() michael@0: return test_results