File size: 6,061 Bytes
2abfccb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
#coding:utf-8
import os
import sys
import argparse
import copy
from datetime import datetime
import common_util
import signal
test_dir = os.path.dirname(os.path.realpath(__file__))
TESTS = ['test_config', 'test_read']
# Tests need to be run with pytest.
USE_PYTEST_LIST = []
CUSTOM_HANDLERS = {}
def print_to_stderr(message):
print(message, file=sys.stderr)
def parse_test_module(test):
return test.split('.')[0]
class TestChoices(list):
def __init__(self, *args, **kwargs):
super(TestChoices, self).__init__(args[0])
def __contains__(self, item):
return list.__contains__(self, parse_test_module(item))
def parse_args():
parser = argparse.ArgumentParser(
description='Run the Petrel unit test suite',
epilog='where TESTS is any of: {}'.format(', '.join(TESTS)))
parser.add_argument(
'-pt',
'--pytest',
action='store_true',
help='If true, use `pytest` to execute the tests. E.g., this runs '
'python run_test.py -pt')
parser.add_argument(
'-i',
'--include',
nargs='+',
choices=TestChoices(TESTS),
default=TESTS,
metavar='TESTS',
help='select a set of tests to include (defaults to ALL tests).'
' tests are specified with module name')
parser.add_argument('-x',
'--exclude',
nargs='+',
choices=TESTS,
metavar='TESTS',
default=[],
help='select a set of tests to exclude')
parser.add_argument(
'--continue-through-error',
action='store_true',
help='Runs the full test suite despite one of the tests failing')
parser.add_argument(
'additional_unittest_args',
nargs='*',
help='additional arguments passed through to unittest, e.g., '
'python run_test.py -i test_config -- -s test_report.log'
'to save test report in test_report.log')
return parser.parse_args()
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if exclude_message is not None:
print_to_stderr('Excluding {} {}'.format(
test, exclude_message))
selected_tests.remove(test)
return selected_tests
def get_selected_tests(options):
selected_tests = options.include
selected_tests = exclude_tests(options.exclude, selected_tests)
return selected_tests
def get_executable_command(options, allow_pytest):
executable = [sys.executable]
if options.pytest:
if allow_pytest:
executable += ['-m', 'pytest']
else:
print_to_stderr(
'Pytest cannot be used for this test. Falling back to unittest.'
)
return executable
def run_test(test_module,
test_directory,
options,
launcher_cmd=None,
extra_unittest_args=None):
unittest_args = options.additional_unittest_args.copy()
if extra_unittest_args:
assert isinstance(extra_unittest_args, list)
unittest_args.extend(extra_unittest_args)
# If using pytest, replace -f with equivalent -x
if options.pytest:
unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args]
# Can't call `python -m unittest test_*` here because it doesn't run code
# in `if __name__ == '__main__': `. So call `python test_*.py` instead.
print(test_module)
argv = [test_module + '.py'] + unittest_args
# Extra arguments are not supported with pytest
executable = get_executable_command(options,
allow_pytest=not extra_unittest_args)
command = (launcher_cmd or []) + executable + argv
print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now()))
return common_util.shell(command, test_directory)
# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python
SIGNALS_TO_NAMES_DICT = {
getattr(signal, n): n
for n in dir(signal) if n.startswith('SIG') and '_' not in n
}
def run_test_module(test, test_directory, options):
test_module = parse_test_module(test)
# Printing the date here can help diagnose which tests are slow
print_to_stderr('Running {} ... [{}]'.format(test, datetime.now()))
handler = CUSTOM_HANDLERS.get(test, run_test)
return_code = handler(test_module, test_directory, options)
assert isinstance(return_code, int) and not isinstance(
return_code, bool), 'Return code should be an integer'
if return_code == 0:
return None
message = '{test} failed!'
if return_code < 0:
# subprocess.Popen returns the child process' exit signal as
# return code -N, where N is the signal number.
signal_name = SIGNALS_TO_NAMES_DICT[-return_code]
print(signal_name)
message += ' Received signal: ' + signal_name
return message
def main():
options = parse_args()
test_directory = os.path.dirname(os.path.abspath(__file__))
selected_tests = get_selected_tests(options)
failure_messages = []
has_failed = False
for test in selected_tests:
options_clone = copy.deepcopy(options)
if test in USE_PYTEST_LIST:
options_clone.pytest = True
err_message = run_test_module(test, test_directory, options_clone)
if err_message is None:
continue
has_failed = True
failure_messages.append(err_message)
if not options_clone.continue_through_error:
raise RuntimeError(err_message)
print_to_stderr(err_message)
if options.continue_through_error and has_failed:
for err in failure_messages:
print_to_stderr(err)
sys.exit(1)
if __name__ == '__main__':
main()
|