X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=test%2Fpy%2Fconftest.py;h=00d8ef8ba99ce76ce4a48df0d81773ef10237892;hb=a1b633df55d5207b49ce145ede783d671385854c;hp=449f98bee39cb1ecc2d7758af18e72e98d7283a0;hpb=595af9db2422fa5ae734cfe615415b17a5098f34;p=oweals%2Fu-boot.git diff --git a/test/py/conftest.py b/test/py/conftest.py index 449f98bee3..00d8ef8ba9 100644 --- a/test/py/conftest.py +++ b/test/py/conftest.py @@ -1,7 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2015 Stephen Warren # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. -# -# SPDX-License-Identifier: GPL-2.0 # Implementation of pytest run-time hook functions. These are invoked by # pytest at certain points during operation, e.g. startup, for each executed @@ -19,11 +18,15 @@ import os import os.path import pytest from _pytest.runner import runtestprotocol -import ConfigParser import re import StringIO import sys +try: + import configparser +except: + import ConfigParser as configparser + # Globals: The HTML log file, and the connection to the U-Boot console. log = None console = None @@ -114,8 +117,8 @@ def pytest_configure(config): mkdir_p(persistent_data_dir) gdbserver = config.getoption('gdbserver') - if gdbserver and board_type != 'sandbox': - raise Exception('--gdbserver only supported with sandbox') + if gdbserver and not board_type.startswith('sandbox'): + raise Exception('--gdbserver only supported with sandbox targets') import multiplexed_log log = multiplexed_log.Logfile(result_dir + '/test-log.html') @@ -167,7 +170,7 @@ def pytest_configure(config): with open(dot_config, 'rt') as f: ini_str = '[root]\n' + f.read() ini_sio = StringIO.StringIO(ini_str) - parser = ConfigParser.RawConfigParser() + parser = configparser.RawConfigParser() parser.readfp(ini_sio) ubconfig.buildconfig.update(parser.items('root')) @@ -179,6 +182,7 @@ def pytest_configure(config): ubconfig.board_type = board_type ubconfig.board_identity = board_identity ubconfig.gdbserver = gdbserver + ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' env_vars = ( 'board_type', @@ -192,14 +196,14 @@ def pytest_configure(config): for v in env_vars: os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v) - if board_type == 'sandbox': + if board_type.startswith('sandbox'): import u_boot_console_sandbox console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig) else: import u_boot_console_exec_attach console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) -re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$') +re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$') def generate_ut_subtest(metafunc, fixture_name): """Provide parametrization for a ut_subtest fixture. @@ -297,6 +301,32 @@ def pytest_generate_tests(metafunc): continue generate_config(metafunc, fn) +@pytest.fixture(scope='session') +def u_boot_log(request): + """Generate the value of a test's log fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.log + +@pytest.fixture(scope='session') +def u_boot_config(request): + """Generate the value of a test's u_boot_config fixture. + + Args: + request: The pytest request. + + Returns: + The fixture value. + """ + + return console.config + @pytest.fixture(scope='function') def u_boot_console(request): """Generate the value of a test's u_boot_console fixture. @@ -317,6 +347,7 @@ tests_failed = [] tests_xpassed = [] tests_xfailed = [] tests_skipped = [] +tests_warning = [] tests_passed = [] def pytest_itemcollected(item): @@ -353,6 +384,11 @@ def cleanup(): if log: with log.section('Status Report', 'status_report'): log.status_pass('%d passed' % len(tests_passed)) + if tests_warning: + log.status_warning('%d passed with warning' % len(tests_warning)) + for test in tests_warning: + anchor = anchors.get(test, None) + log.status_warning('... ' + test, anchor) if tests_skipped: log.status_skipped('%d skipped' % len(tests_skipped)) for test in tests_skipped: @@ -402,12 +438,12 @@ def setup_boardspec(item): for board in mark.args: if board.startswith('!'): if ubconfig.board_type == board[1:]: - pytest.skip('board not supported') + pytest.skip('board "%s" not supported' % ubconfig.board_type) return else: required_boards.append(board) if required_boards and ubconfig.board_type not in required_boards: - pytest.skip('board not supported') + pytest.skip('board "%s" not supported' % ubconfig.board_type) def setup_buildconfigspec(item): """Process any 'buildconfigspec' marker for a test. @@ -424,11 +460,46 @@ def setup_buildconfigspec(item): """ mark = item.get_marker('buildconfigspec') + if mark: + for option in mark.args: + if not ubconfig.buildconfig.get('config_' + option.lower(), None): + pytest.skip('.config feature "%s" not enabled' % option.lower()) + notmark = item.get_marker('notbuildconfigspec') + if notmark: + for option in notmark.args: + if ubconfig.buildconfig.get('config_' + option.lower(), None): + pytest.skip('.config feature "%s" enabled' % option.lower()) + +def tool_is_in_path(tool): + for path in os.environ["PATH"].split(os.pathsep): + fn = os.path.join(path, tool) + if os.path.isfile(fn) and os.access(fn, os.X_OK): + return True + return False + +def setup_requiredtool(item): + """Process any 'requiredtool' marker for a test. + + Such a marker lists some external tool (binary, executable, application) + that the test requires. If tests are being executed on a system that + doesn't have the required tool, the test is marked to be skipped. + + Args: + item: The pytest test item. + + Returns: + Nothing. + """ + + mark = item.get_marker('requiredtool') if not mark: return - for option in mark.args: - if not ubconfig.buildconfig.get('config_' + option.lower(), None): - pytest.skip('.config feature not enabled') + for tool in mark.args: + if not tool_is_in_path(tool): + pytest.skip('tool "%s" not in $PATH' % tool) + +def start_test_section(item): + anchors[item.name] = log.start_section(item.name) def pytest_runtest_setup(item): """pytest hook: Configure (set up) a test item. @@ -443,9 +514,10 @@ def pytest_runtest_setup(item): Nothing. """ - anchors[item.name] = log.start_section(item.name) + start_test_section(item) setup_boardspec(item) setup_buildconfigspec(item) + setup_requiredtool(item) def pytest_runtest_protocol(item, nextitem): """pytest hook: Called to execute a test. @@ -461,12 +533,27 @@ def pytest_runtest_protocol(item, nextitem): A list of pytest reports (test result data). """ + log.get_and_reset_warning() reports = runtestprotocol(item, nextitem=nextitem) + was_warning = log.get_and_reset_warning() + + # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if + # the test is skipped. That call is required to create the test's section + # in the log file. The call to log.end_section() requires that the log + # contain a section for this test. Create a section for the test if it + # doesn't already exist. + if not item.name in anchors: + start_test_section(item) failure_cleanup = False - test_list = tests_passed - msg = 'OK' - msg_log = log.status_pass + if not was_warning: + test_list = tests_passed + msg = 'OK' + msg_log = log.status_pass + else: + test_list = tests_warning + msg = 'OK (with warning)' + msg_log = log.status_warning for report in reports: if report.outcome == 'failed': if hasattr(report, 'wasxfail'): @@ -507,7 +594,7 @@ def pytest_runtest_protocol(item, nextitem): # is fixed, if this exception still exists, it will then be logged as # part of the test's stdout. import traceback - print 'Exception occurred while logging runtest status:' + print('Exception occurred while logging runtest status:') traceback.print_exc() # FIXME: Can we force a test failure here?