Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
michael@0 | 2 | # Use of this source code is governed by a BSD-style license that can be |
michael@0 | 3 | # found in the LICENSE file. |
michael@0 | 4 | |
michael@0 | 5 | |
michael@0 | 6 | import logging |
michael@0 | 7 | import re |
michael@0 | 8 | import os |
michael@0 | 9 | |
michael@0 | 10 | import constants |
michael@0 | 11 | from perf_tests_helper import PrintPerfResult |
michael@0 | 12 | from pylib import pexpect |
michael@0 | 13 | from test_result import BaseTestResult, TestResults |
michael@0 | 14 | |
michael@0 | 15 | |
michael@0 | 16 | # TODO(bulach): TestPackage, TestPackageExecutable and |
michael@0 | 17 | # TestPackageApk are a work in progress related to making the native tests |
michael@0 | 18 | # run as a NDK-app from an APK rather than a stand-alone executable. |
michael@0 | 19 | class TestPackage(object): |
michael@0 | 20 | """A helper base class for both APK and stand-alone executables. |
michael@0 | 21 | |
michael@0 | 22 | Args: |
michael@0 | 23 | adb: ADB interface the tests are using. |
michael@0 | 24 | device: Device to run the tests. |
michael@0 | 25 | test_suite: A specific test suite to run, empty to run all. |
michael@0 | 26 | timeout: Timeout for each test. |
michael@0 | 27 | rebaseline: Whether or not to run tests in isolation and update the filter. |
michael@0 | 28 | performance_test: Whether or not performance test(s). |
michael@0 | 29 | cleanup_test_files: Whether or not to cleanup test files on device. |
michael@0 | 30 | tool: Name of the Valgrind tool. |
michael@0 | 31 | dump_debug_info: A debug_info object. |
michael@0 | 32 | """ |
michael@0 | 33 | |
michael@0 | 34 | def __init__(self, adb, device, test_suite, timeout, rebaseline, |
michael@0 | 35 | performance_test, cleanup_test_files, tool, dump_debug_info): |
michael@0 | 36 | self.adb = adb |
michael@0 | 37 | self.device = device |
michael@0 | 38 | self.test_suite_full = test_suite |
michael@0 | 39 | self.test_suite = os.path.splitext(test_suite)[0] |
michael@0 | 40 | self.test_suite_basename = self._GetTestSuiteBaseName() |
michael@0 | 41 | self.test_suite_dirname = os.path.dirname( |
michael@0 | 42 | self.test_suite.split(self.test_suite_basename)[0]) |
michael@0 | 43 | self.rebaseline = rebaseline |
michael@0 | 44 | self.performance_test = performance_test |
michael@0 | 45 | self.cleanup_test_files = cleanup_test_files |
michael@0 | 46 | self.tool = tool |
michael@0 | 47 | if timeout == 0: |
michael@0 | 48 | timeout = 60 |
michael@0 | 49 | # On a VM (e.g. chromium buildbots), this timeout is way too small. |
michael@0 | 50 | if os.environ.get('BUILDBOT_SLAVENAME'): |
michael@0 | 51 | timeout = timeout * 2 |
michael@0 | 52 | self.timeout = timeout * self.tool.GetTimeoutScale() |
michael@0 | 53 | self.dump_debug_info = dump_debug_info |
michael@0 | 54 | |
michael@0 | 55 | def _BeginGetIOStats(self): |
michael@0 | 56 | """Gets I/O statistics before running test. |
michael@0 | 57 | |
michael@0 | 58 | Return: |
michael@0 | 59 | I/O stats object.The I/O stats object may be None if the test is not |
michael@0 | 60 | performance test. |
michael@0 | 61 | """ |
michael@0 | 62 | initial_io_stats = None |
michael@0 | 63 | # Try to get the disk I/O statistics for all performance tests. |
michael@0 | 64 | if self.performance_test and not self.rebaseline: |
michael@0 | 65 | initial_io_stats = self.adb.GetIoStats() |
michael@0 | 66 | return initial_io_stats |
michael@0 | 67 | |
michael@0 | 68 | def _EndGetIOStats(self, initial_io_stats): |
michael@0 | 69 | """Gets I/O statistics after running test and calcuate the I/O delta. |
michael@0 | 70 | |
michael@0 | 71 | Args: |
michael@0 | 72 | initial_io_stats: I/O stats object got from _BeginGetIOStats. |
michael@0 | 73 | |
michael@0 | 74 | Return: |
michael@0 | 75 | String for formated diso I/O statistics. |
michael@0 | 76 | """ |
michael@0 | 77 | disk_io = '' |
michael@0 | 78 | if self.performance_test and initial_io_stats: |
michael@0 | 79 | final_io_stats = self.adb.GetIoStats() |
michael@0 | 80 | for stat in final_io_stats: |
michael@0 | 81 | disk_io += '\n' + PrintPerfResult(stat, stat, |
michael@0 | 82 | [final_io_stats[stat] - |
michael@0 | 83 | initial_io_stats[stat]], |
michael@0 | 84 | stat.split('_')[1], |
michael@0 | 85 | print_to_stdout=False) |
michael@0 | 86 | logging.info(disk_io) |
michael@0 | 87 | return disk_io |
michael@0 | 88 | |
michael@0 | 89 | def GetDisabledPrefixes(self): |
michael@0 | 90 | return ['DISABLED_', 'FLAKY_', 'FAILS_'] |
michael@0 | 91 | |
michael@0 | 92 | def _ParseGTestListTests(self, all_tests): |
michael@0 | 93 | ret = [] |
michael@0 | 94 | current = '' |
michael@0 | 95 | disabled_prefixes = self.GetDisabledPrefixes() |
michael@0 | 96 | for test in all_tests: |
michael@0 | 97 | if not test: |
michael@0 | 98 | continue |
michael@0 | 99 | if test[0] != ' ' and not test.endswith('.'): |
michael@0 | 100 | # Ignore any lines with unexpected format. |
michael@0 | 101 | continue |
michael@0 | 102 | if test[0] != ' ' and test.endswith('.'): |
michael@0 | 103 | current = test |
michael@0 | 104 | continue |
michael@0 | 105 | if 'YOU HAVE' in test: |
michael@0 | 106 | break |
michael@0 | 107 | test_name = test[2:] |
michael@0 | 108 | if not any([test_name.startswith(x) for x in disabled_prefixes]): |
michael@0 | 109 | ret += [current + test_name] |
michael@0 | 110 | return ret |
michael@0 | 111 | |
michael@0 | 112 | def PushDataAndPakFiles(self): |
michael@0 | 113 | external_storage = self.adb.GetExternalStorage() |
michael@0 | 114 | if (self.test_suite_basename == 'ui_unittests' or |
michael@0 | 115 | self.test_suite_basename == 'unit_tests'): |
michael@0 | 116 | self.adb.PushIfNeeded( |
michael@0 | 117 | self.test_suite_dirname + '/chrome.pak', |
michael@0 | 118 | external_storage + '/paks/chrome.pak') |
michael@0 | 119 | self.adb.PushIfNeeded( |
michael@0 | 120 | self.test_suite_dirname + '/locales/en-US.pak', |
michael@0 | 121 | external_storage + '/paks/en-US.pak') |
michael@0 | 122 | if self.test_suite_basename == 'unit_tests': |
michael@0 | 123 | self.adb.PushIfNeeded( |
michael@0 | 124 | self.test_suite_dirname + '/resources.pak', |
michael@0 | 125 | external_storage + '/paks/resources.pak') |
michael@0 | 126 | self.adb.PushIfNeeded( |
michael@0 | 127 | self.test_suite_dirname + '/chrome_100_percent.pak', |
michael@0 | 128 | external_storage + '/paks/chrome_100_percent.pak') |
michael@0 | 129 | self.adb.PushIfNeeded(self.test_suite_dirname + '/test_data', |
michael@0 | 130 | external_storage + '/test_data') |
michael@0 | 131 | if self.test_suite_basename == 'content_unittests': |
michael@0 | 132 | self.adb.PushIfNeeded( |
michael@0 | 133 | self.test_suite_dirname + '/content_resources.pak', |
michael@0 | 134 | external_storage + '/paks/content_resources.pak') |
michael@0 | 135 | |
michael@0 | 136 | def _WatchTestOutput(self, p): |
michael@0 | 137 | """Watches the test output. |
michael@0 | 138 | Args: |
michael@0 | 139 | p: the process generating output as created by pexpect.spawn. |
michael@0 | 140 | """ |
michael@0 | 141 | ok_tests = [] |
michael@0 | 142 | failed_tests = [] |
michael@0 | 143 | crashed_tests = [] |
michael@0 | 144 | timed_out = False |
michael@0 | 145 | overall_fail = False |
michael@0 | 146 | re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
michael@0 | 147 | # APK tests rely on the PASSED tag. |
michael@0 | 148 | re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') |
michael@0 | 149 | # Signal handlers are installed before starting tests |
michael@0 | 150 | # to output the CRASHED marker when a crash happens. |
michael@0 | 151 | re_crash = re.compile('\[ CRASHED \](.*)\r\n') |
michael@0 | 152 | re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
michael@0 | 153 | re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') |
michael@0 | 154 | re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') |
michael@0 | 155 | io_stats_before = self._BeginGetIOStats() |
michael@0 | 156 | try: |
michael@0 | 157 | while True: |
michael@0 | 158 | found = p.expect([re_run, re_passed, re_runner_fail], |
michael@0 | 159 | timeout=self.timeout) |
michael@0 | 160 | if found == 1: # matched PASSED. |
michael@0 | 161 | break |
michael@0 | 162 | if found == 2: # RUNNER_FAILED |
michael@0 | 163 | logging.error('RUNNER_FAILED') |
michael@0 | 164 | overall_fail = True |
michael@0 | 165 | break |
michael@0 | 166 | if self.dump_debug_info: |
michael@0 | 167 | self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') |
michael@0 | 168 | full_test_name = p.match.group(1).replace('\r', '') |
michael@0 | 169 | found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) |
michael@0 | 170 | if found == 0: # re_ok |
michael@0 | 171 | if full_test_name == p.match.group(1).replace('\r', ''): |
michael@0 | 172 | ok_tests += [BaseTestResult(full_test_name, p.before)] |
michael@0 | 173 | continue |
michael@0 | 174 | if found == 2: # re_crash |
michael@0 | 175 | crashed_tests += [BaseTestResult(full_test_name, p.before)] |
michael@0 | 176 | overall_fail = True |
michael@0 | 177 | break |
michael@0 | 178 | # The test failed. |
michael@0 | 179 | failed_tests += [BaseTestResult(full_test_name, p.before)] |
michael@0 | 180 | except pexpect.EOF: |
michael@0 | 181 | logging.error('Test terminated - EOF') |
michael@0 | 182 | except pexpect.TIMEOUT: |
michael@0 | 183 | logging.error('Test terminated after %d second timeout.', |
michael@0 | 184 | self.timeout) |
michael@0 | 185 | timed_out = True |
michael@0 | 186 | finally: |
michael@0 | 187 | p.close() |
michael@0 | 188 | if not self.rebaseline: |
michael@0 | 189 | ok_tests += self._EndGetIOStats(io_stats_before) |
michael@0 | 190 | ret_code = self._GetGTestReturnCode() |
michael@0 | 191 | if ret_code: |
michael@0 | 192 | failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
michael@0 | 193 | 'pexpect.before: %s' |
michael@0 | 194 | '\npexpect.after: %s' |
michael@0 | 195 | % (p.before, |
michael@0 | 196 | p.after))] |
michael@0 | 197 | # Create TestResults and return |
michael@0 | 198 | return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
michael@0 | 199 | crashed=crashed_tests, timed_out=timed_out, |
michael@0 | 200 | overall_fail=overall_fail) |