|
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
|
2 # Use of this source code is governed by a BSD-style license that can be |
|
3 # found in the LICENSE file. |
|
4 |
|
5 import glob |
|
6 import logging |
|
7 import os |
|
8 import sys |
|
9 |
|
10 from base_test_runner import BaseTestRunner |
|
11 import debug_info |
|
12 import constants |
|
13 import perf_tests_helper |
|
14 import run_tests_helper |
|
15 from test_package_apk import TestPackageApk |
|
16 from test_package_executable import TestPackageExecutable |
|
17 from test_result import TestResults |
|
18 |
|
19 |
|
20 class SingleTestRunner(BaseTestRunner): |
|
21 """Single test suite attached to a single device. |
|
22 |
|
23 Args: |
|
24 device: Device to run the tests. |
|
25 test_suite: A specific test suite to run, empty to run all. |
|
26 gtest_filter: A gtest_filter flag. |
|
27 test_arguments: Additional arguments to pass to the test binary. |
|
28 timeout: Timeout for each test. |
|
29 rebaseline: Whether or not to run tests in isolation and update the filter. |
|
30 performance_test: Whether or not performance test(s). |
|
31 cleanup_test_files: Whether or not to cleanup test files on device. |
|
32 tool: Name of the Valgrind tool. |
|
33 shard_index: index number of the shard on which the test suite will run. |
|
34 dump_debug_info: Whether or not to dump debug information. |
|
35 build_type: 'Release' or 'Debug'. |
|
36 """ |
|
37 |
|
38 def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout, |
|
39 rebaseline, performance_test, cleanup_test_files, tool_name, |
|
40 shard_index, dump_debug_info, fast_and_loose, build_type): |
|
41 BaseTestRunner.__init__(self, device, tool_name, shard_index, build_type) |
|
42 self._running_on_emulator = self.device.startswith('emulator') |
|
43 self._gtest_filter = gtest_filter |
|
44 self._test_arguments = test_arguments |
|
45 self.test_results = TestResults() |
|
46 if dump_debug_info: |
|
47 self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device, |
|
48 os.path.basename(test_suite), gtest_filter) |
|
49 else: |
|
50 self.dump_debug_info = None |
|
51 self.fast_and_loose = fast_and_loose |
|
52 |
|
53 logging.warning('Test suite: ' + test_suite) |
|
54 if os.path.splitext(test_suite)[1] == '.apk': |
|
55 self.test_package = TestPackageApk(self.adb, device, |
|
56 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, |
|
57 self.tool, self.dump_debug_info) |
|
58 else: |
|
59 self.test_package = TestPackageExecutable( |
|
60 self.adb, device, |
|
61 test_suite, timeout, rebaseline, performance_test, cleanup_test_files, |
|
62 self.tool, self.dump_debug_info) |
|
63 self._performance_test_setup = None |
|
64 if performance_test: |
|
65 self._performance_test_setup = perf_tests_helper.PerfTestSetup(self.adb) |
|
66 |
|
67 def _TestSuiteRequiresMockTestServer(self): |
|
68 """Returns True if the test suite requires mock test server.""" |
|
69 return False |
|
70 # TODO(yfriedman): Disabled because of flakiness. |
|
71 # (self.test_package.test_suite_basename == 'unit_tests' or |
|
72 # self.test_package.test_suite_basename == 'net_unittests' or |
|
73 # False) |
|
74 |
|
75 def _GetFilterFileName(self): |
|
76 """Returns the filename of gtest filter.""" |
|
77 return os.path.join(sys.path[0], 'gtest_filter', |
|
78 self.test_package.test_suite_basename + '_disabled') |
|
79 |
|
80 def _GetAdditionalEmulatorFilterName(self): |
|
81 """Returns the filename of additional gtest filter for emulator.""" |
|
82 return os.path.join(sys.path[0], 'gtest_filter', |
|
83 self.test_package.test_suite_basename + |
|
84 '_emulator_additional_disabled') |
|
85 |
|
86 def GetDisabledTests(self): |
|
87 """Returns a list of disabled tests. |
|
88 |
|
89 Returns: |
|
90 A list of disabled tests obtained from gtest_filter/test_suite_disabled. |
|
91 """ |
|
92 disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName()) |
|
93 if self._running_on_emulator: |
|
94 # Append emulator's filter file. |
|
95 disabled_tests.extend(run_tests_helper.GetExpectations( |
|
96 self._GetAdditionalEmulatorFilterName())) |
|
97 return disabled_tests |
|
98 |
|
99 def UpdateFilter(self, failed_tests): |
|
100 """Updates test_suite_disabled file with the new filter (deletes if empty). |
|
101 |
|
102 If running in Emulator, only the failed tests which are not in the normal |
|
103 filter returned by _GetFilterFileName() are written to emulator's |
|
104 additional filter file. |
|
105 |
|
106 Args: |
|
107 failed_tests: A sorted list of failed tests. |
|
108 """ |
|
109 disabled_tests = [] |
|
110 if not self._running_on_emulator: |
|
111 filter_file_name = self._GetFilterFileName() |
|
112 else: |
|
113 filter_file_name = self._GetAdditionalEmulatorFilterName() |
|
114 disabled_tests.extend( |
|
115 run_tests_helper.GetExpectations(self._GetFilterFileName())) |
|
116 logging.info('About to update emulator\'s additional filter (%s).' |
|
117 % filter_file_name) |
|
118 |
|
119 new_failed_tests = [] |
|
120 if failed_tests: |
|
121 for test in failed_tests: |
|
122 if test.name not in disabled_tests: |
|
123 new_failed_tests.append(test.name) |
|
124 |
|
125 if not new_failed_tests: |
|
126 if os.path.exists(filter_file_name): |
|
127 os.unlink(filter_file_name) |
|
128 return |
|
129 |
|
130 filter_file = file(filter_file_name, 'w') |
|
131 if self._running_on_emulator: |
|
132 filter_file.write('# Addtional list of suppressions from emulator\n') |
|
133 else: |
|
134 filter_file.write('# List of suppressions\n') |
|
135 filter_file.write('# This file was automatically generated by %s\n' |
|
136 % sys.argv[0]) |
|
137 filter_file.write('\n'.join(sorted(new_failed_tests))) |
|
138 filter_file.write('\n') |
|
139 filter_file.close() |
|
140 |
|
141 def GetDataFilesForTestSuite(self): |
|
142 """Returns a list of data files/dirs needed by the test suite.""" |
|
143 # Ideally, we'd just push all test data. However, it has >100MB, and a lot |
|
144 # of the files are not relevant (some are used for browser_tests, others for |
|
145 # features not supported, etc..). |
|
146 if self.test_package.test_suite_basename in ['base_unittests', |
|
147 'sql_unittests', |
|
148 'unit_tests']: |
|
149 test_files = [ |
|
150 'base/data/file_util_unittest', |
|
151 'base/data/json/bom_feff.json', |
|
152 'chrome/test/data/download-test1.lib', |
|
153 'chrome/test/data/extensions/bad_magic.crx', |
|
154 'chrome/test/data/extensions/good.crx', |
|
155 'chrome/test/data/extensions/icon1.png', |
|
156 'chrome/test/data/extensions/icon2.png', |
|
157 'chrome/test/data/extensions/icon3.png', |
|
158 'chrome/test/data/extensions/allow_silent_upgrade/', |
|
159 'chrome/test/data/extensions/app/', |
|
160 'chrome/test/data/extensions/bad/', |
|
161 'chrome/test/data/extensions/effective_host_permissions/', |
|
162 'chrome/test/data/extensions/empty_manifest/', |
|
163 'chrome/test/data/extensions/good/Extensions/', |
|
164 'chrome/test/data/extensions/manifest_tests/', |
|
165 'chrome/test/data/extensions/page_action/', |
|
166 'chrome/test/data/extensions/permissions/', |
|
167 'chrome/test/data/extensions/script_and_capture/', |
|
168 'chrome/test/data/extensions/unpacker/', |
|
169 'chrome/test/data/bookmarks/', |
|
170 'chrome/test/data/components/', |
|
171 'chrome/test/data/extensions/json_schema_test.js', |
|
172 'chrome/test/data/History/', |
|
173 'chrome/test/data/json_schema_validator/', |
|
174 'chrome/test/data/pref_service/', |
|
175 'chrome/test/data/serializer_nested_test.js', |
|
176 'chrome/test/data/serializer_test.js', |
|
177 'chrome/test/data/serializer_test_nowhitespace.js', |
|
178 'chrome/test/data/top_sites/', |
|
179 'chrome/test/data/web_app_info/', |
|
180 'chrome/test/data/web_database', |
|
181 'chrome/test/data/webui/', |
|
182 'chrome/test/data/zip', |
|
183 'chrome/third_party/mock4js/', |
|
184 'content/browser/gpu/software_rendering_list.json', |
|
185 'net/data/cache_tests/insert_load1', |
|
186 'net/data/cache_tests/dirty_entry5', |
|
187 'net/data/ssl/certificates/', |
|
188 'ui/base/test/data/data_pack_unittest', |
|
189 ] |
|
190 if self.test_package.test_suite_basename == 'unit_tests': |
|
191 test_files += ['chrome/test/data/simple_open_search.xml'] |
|
192 # The following are spell check data. Now only list the data under |
|
193 # third_party/hunspell_dictionaries which are used by unit tests. |
|
194 old_cwd = os.getcwd() |
|
195 os.chdir(constants.CHROME_DIR) |
|
196 test_files += glob.glob('third_party/hunspell_dictionaries/*.bdic') |
|
197 os.chdir(old_cwd) |
|
198 return test_files |
|
199 elif self.test_package.test_suite_basename == 'net_unittests': |
|
200 return [ |
|
201 'net/data/cache_tests', |
|
202 'net/data/filter_unittests', |
|
203 'net/data/ftp', |
|
204 'net/data/proxy_resolver_v8_unittest', |
|
205 'net/data/ssl/certificates', |
|
206 'net/data/url_request_unittest/', |
|
207 'net/data/proxy_script_fetcher_unittest' |
|
208 ] |
|
209 elif self.test_package.test_suite_basename == 'ui_tests': |
|
210 return [ |
|
211 'chrome/test/data/dromaeo', |
|
212 'chrome/test/data/json2.js', |
|
213 'chrome/test/data/sunspider', |
|
214 'chrome/test/data/v8_benchmark', |
|
215 'chrome/test/perf/sunspider_uitest.js', |
|
216 'chrome/test/perf/v8_benchmark_uitest.js', |
|
217 ] |
|
218 elif self.test_package.test_suite_basename == 'page_cycler_tests': |
|
219 data = [ |
|
220 'tools/page_cycler', |
|
221 'data/page_cycler', |
|
222 ] |
|
223 for d in data: |
|
224 if not os.path.exists(d): |
|
225 raise Exception('Page cycler data not found.') |
|
226 return data |
|
227 elif self.test_package.test_suite_basename == 'webkit_unit_tests': |
|
228 return [ |
|
229 'third_party/WebKit/Source/WebKit/chromium/tests/data', |
|
230 ] |
|
231 elif self.test_package.test_suite_basename == 'content_unittests': |
|
232 return [ |
|
233 'content/test/data/gpu/webgl_conformance_test_expectations.txt', |
|
234 'net/data/ssl/certificates/', |
|
235 'webkit/data/dom_storage/webcore_test_database.localstorage', |
|
236 'third_party/hyphen/hyph_en_US.dic', |
|
237 ] |
|
238 elif self.test_package.test_suite_basename == 'media_unittests': |
|
239 return [ |
|
240 'media/test/data', |
|
241 ] |
|
242 return [] |
|
243 |
|
244 def LaunchHelperToolsForTestSuite(self): |
|
245 """Launches helper tools for the test suite. |
|
246 |
|
247 Sometimes one test may need to run some helper tools first in order to |
|
248 successfully complete the test. |
|
249 """ |
|
250 if self._TestSuiteRequiresMockTestServer(): |
|
251 self.LaunchChromeTestServerSpawner() |
|
252 |
|
253 def StripAndCopyFiles(self): |
|
254 """Strips and copies the required data files for the test suite.""" |
|
255 self.test_package.StripAndCopyExecutable() |
|
256 self.test_package.PushDataAndPakFiles() |
|
257 self.tool.CopyFiles() |
|
258 test_data = self.GetDataFilesForTestSuite() |
|
259 if test_data and not self.fast_and_loose: |
|
260 # Make sure SD card is ready. |
|
261 self.adb.WaitForSdCardReady(20) |
|
262 for data in test_data: |
|
263 self.CopyTestData([data], self.adb.GetExternalStorage()) |
|
264 |
|
265 def RunTestsWithFilter(self): |
|
266 """Runs a tests via a small, temporary shell script.""" |
|
267 self.test_package.CreateTestRunnerScript(self._gtest_filter, |
|
268 self._test_arguments) |
|
269 self.test_results = self.test_package.RunTestsAndListResults() |
|
270 |
|
271 def RebaselineTests(self): |
|
272 """Runs all available tests, restarting in case of failures.""" |
|
273 if self._gtest_filter: |
|
274 all_tests = set(self._gtest_filter.split(':')) |
|
275 else: |
|
276 all_tests = set(self.test_package.GetAllTests()) |
|
277 failed_results = set() |
|
278 executed_results = set() |
|
279 while True: |
|
280 executed_names = set([f.name for f in executed_results]) |
|
281 self._gtest_filter = ':'.join(all_tests - executed_names) |
|
282 self.RunTestsWithFilter() |
|
283 failed_results.update(self.test_results.crashed, |
|
284 self.test_results.failed) |
|
285 executed_results.update(self.test_results.crashed, |
|
286 self.test_results.failed, |
|
287 self.test_results.ok) |
|
288 executed_names = set([f.name for f in executed_results]) |
|
289 logging.info('*' * 80) |
|
290 logging.info(self.device) |
|
291 logging.info('Executed: ' + str(len(executed_names)) + ' of ' + |
|
292 str(len(all_tests))) |
|
293 logging.info('Failed so far: ' + str(len(failed_results)) + ' ' + |
|
294 str([f.name for f in failed_results])) |
|
295 logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' + |
|
296 str(all_tests - executed_names)) |
|
297 logging.info('*' * 80) |
|
298 if executed_names == all_tests: |
|
299 break |
|
300 self.test_results = TestResults.FromRun( |
|
301 ok=list(executed_results - failed_results), |
|
302 failed=list(failed_results)) |
|
303 |
|
304 def RunTests(self): |
|
305 """Runs all tests (in rebaseline mode, runs each test in isolation). |
|
306 |
|
307 Returns: |
|
308 A TestResults object. |
|
309 """ |
|
310 if self.test_package.rebaseline: |
|
311 self.RebaselineTests() |
|
312 else: |
|
313 if not self._gtest_filter: |
|
314 self._gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' + |
|
315 ':'.join(['*.' + x + '*' for x in |
|
316 self.test_package.GetDisabledPrefixes()])) |
|
317 self.RunTestsWithFilter() |
|
318 return self.test_results |
|
319 |
|
320 def SetUp(self): |
|
321 """Sets up necessary test enviroment for the test suite.""" |
|
322 super(SingleTestRunner, self).SetUp() |
|
323 self.adb.ClearApplicationState(constants.CHROME_PACKAGE) |
|
324 if self._performance_test_setup: |
|
325 self._performance_test_setup.SetUp() |
|
326 if self.dump_debug_info: |
|
327 self.dump_debug_info.StartRecordingLog(True) |
|
328 self.StripAndCopyFiles() |
|
329 self.LaunchHelperToolsForTestSuite() |
|
330 self.tool.SetupEnvironment() |
|
331 |
|
332 def TearDown(self): |
|
333 """Cleans up the test enviroment for the test suite.""" |
|
334 self.tool.CleanUpEnvironment() |
|
335 if self.test_package.cleanup_test_files: |
|
336 self.adb.RemovePushedFiles() |
|
337 if self.dump_debug_info: |
|
338 self.dump_debug_info.StopRecordingLog() |
|
339 if self._performance_test_setup: |
|
340 self._performance_test_setup.TearDown() |
|
341 if self.dump_debug_info: |
|
342 self.dump_debug_info.ArchiveNewCrashFiles() |
|
343 super(SingleTestRunner, self).TearDown() |