Merge git://git.denx.de/u-boot-sunxi
[oweals/u-boot.git] / test / py / conftest.py
index 09638e64a3e3c60f66f74c734460db341f12a334..446d8cb6faf1e23eab5ac14b1d54037c588b2143 100644 (file)
@@ -1,7 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
 # Copyright (c) 2015 Stephen Warren
 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
-#
-# SPDX-License-Identifier: GPL-2.0
 
 # Implementation of pytest run-time hook functions. These are invoked by
 # pytest at certain points during operation, e.g. startup, for each executed
@@ -17,10 +16,10 @@ import atexit
 import errno
 import os
 import os.path
-import pexpect
 import pytest
 from _pytest.runner import runtestprotocol
 import ConfigParser
+import re
 import StringIO
 import sys
 
@@ -129,10 +128,12 @@ def pytest_configure(config):
             ['make', o_opt, '-s', board_type + '_defconfig'],
             ['make', o_opt, '-s', '-j8'],
         )
-        runner = log.get_runner('make', sys.stdout)
-        for cmd in cmds:
-            runner.run(cmd, cwd=source_dir)
-        runner.close()
+        with log.section('make'):
+            runner = log.get_runner('make', sys.stdout)
+            for cmd in cmds:
+                runner.run(cmd, cwd=source_dir)
+            runner.close()
+            log.status_pass('OK')
 
     class ArbitraryAttributeContainer(object):
         pass
@@ -177,6 +178,7 @@ def pytest_configure(config):
     ubconfig.board_type = board_type
     ubconfig.board_identity = board_identity
     ubconfig.gdbserver = gdbserver
+    ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
 
     env_vars = (
         'board_type',
@@ -190,15 +192,49 @@ def pytest_configure(config):
     for v in env_vars:
         os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
 
-    if board_type == 'sandbox':
+    if board_type.startswith('sandbox'):
         import u_boot_console_sandbox
         console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
     else:
         import u_boot_console_exec_attach
         console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
 
-def pytest_generate_tests(metafunc):
-    """pytest hook: parameterize test functions based on custom rules.
+re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
+def generate_ut_subtest(metafunc, fixture_name):
+    """Provide parametrization for a ut_subtest fixture.
+
+    Determines the set of unit tests built into a U-Boot binary by parsing the
+    list of symbols generated by the build process. Provides this information
+    to test functions by parameterizing their ut_subtest fixture parameter.
+
+    Args:
+        metafunc: The pytest test function.
+        fixture_name: The fixture name to test.
+
+    Returns:
+        Nothing.
+    """
+
+    fn = console.config.build_dir + '/u-boot.sym'
+    try:
+        with open(fn, 'rt') as f:
+            lines = f.readlines()
+    except:
+        lines = []
+    lines.sort()
+
+    vals = []
+    for l in lines:
+        m = re_ut_test_list.search(l)
+        if not m:
+            continue
+        vals.append(m.group(1) + ' ' + m.group(2))
+
+    ids = ['ut_' + s.replace(' ', '_') for s in vals]
+    metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def generate_config(metafunc, fixture_name):
+    """Provide parametrization for {env,brd}__ fixtures.
 
     If a test function takes parameter(s) (fixture names) of the form brd__xxx
     or env__xxx, the brd and env configuration dictionaries are consulted to
@@ -207,6 +243,7 @@ def pytest_generate_tests(metafunc):
 
     Args:
         metafunc: The pytest test function.
+        fixture_name: The fixture name to test.
 
     Returns:
         Nothing.
@@ -216,30 +253,75 @@ def pytest_generate_tests(metafunc):
         'brd': console.config.brd,
         'env': console.config.env,
     }
+    parts = fixture_name.split('__')
+    if len(parts) < 2:
+        return
+    if parts[0] not in subconfigs:
+        return
+    subconfig = subconfigs[parts[0]]
+    vals = []
+    val = subconfig.get(fixture_name, [])
+    # If that exact name is a key in the data source:
+    if val:
+        # ... use the dict value as a single parameter value.
+        vals = (val, )
+    else:
+        # ... otherwise, see if there's a key that contains a list of
+        # values to use instead.
+        vals = subconfig.get(fixture_name+ 's', [])
+    def fixture_id(index, val):
+        try:
+            return val['fixture_id']
+        except:
+            return fixture_name + str(index)
+    ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
+    metafunc.parametrize(fixture_name, vals, ids=ids)
+
+def pytest_generate_tests(metafunc):
+    """pytest hook: parameterize test functions based on custom rules.
+
+    Check each test function parameter (fixture name) to see if it is one of
+    our custom names, and if so, provide the correct parametrization for that
+    parameter.
+
+    Args:
+        metafunc: The pytest test function.
+
+    Returns:
+        Nothing.
+    """
+
     for fn in metafunc.fixturenames:
-        parts = fn.split('__')
-        if len(parts) < 2:
+        if fn == 'ut_subtest':
+            generate_ut_subtest(metafunc, fn)
             continue
-        if parts[0] not in subconfigs:
-            continue
-        subconfig = subconfigs[parts[0]]
-        vals = []
-        val = subconfig.get(fn, [])
-        # If that exact name is a key in the data source:
-        if val:
-            # ... use the dict value as a single parameter value.
-            vals = (val, )
-        else:
-            # ... otherwise, see if there's a key that contains a list of
-            # values to use instead.
-            vals = subconfig.get(fn + 's', [])
-        def fixture_id(index, val):
-            try:
-                return val["fixture_id"]
-            except:
-                return fn + str(index)
-        ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
-        metafunc.parametrize(fn, vals, ids=ids)
+        generate_config(metafunc, fn)
+
+@pytest.fixture(scope='session')
+def u_boot_log(request):
+     """Generate the value of a test's log fixture.
+
+     Args:
+         request: The pytest request.
+
+     Returns:
+         The fixture value.
+     """
+
+     return console.log
+
+@pytest.fixture(scope='session')
+def u_boot_config(request):
+     """Generate the value of a test's u_boot_config fixture.
+
+     Args:
+         request: The pytest request.
+
+     Returns:
+         The fixture value.
+     """
+
+     return console.config
 
 @pytest.fixture(scope='function')
 def u_boot_console(request):
@@ -255,12 +337,14 @@ def u_boot_console(request):
     console.ensure_spawned()
     return console
 
-tests_not_run = set()
-tests_failed = set()
-tests_xpassed = set()
-tests_xfailed = set()
-tests_skipped = set()
-tests_passed = set()
+anchors = {}
+tests_not_run = []
+tests_failed = []
+tests_xpassed = []
+tests_xfailed = []
+tests_skipped = []
+tests_warning = []
+tests_passed = []
 
 def pytest_itemcollected(item):
     """pytest hook: Called once for each test found during collection.
@@ -275,7 +359,7 @@ def pytest_itemcollected(item):
         Nothing.
     """
 
-    tests_not_run.add(item.name)
+    tests_not_run.append(item.name)
 
 def cleanup():
     """Clean up all global state.
@@ -294,27 +378,38 @@ def cleanup():
     if console:
         console.close()
     if log:
-        log.status_pass('%d passed' % len(tests_passed))
-        if tests_skipped:
-            log.status_skipped('%d skipped' % len(tests_skipped))
-            for test in tests_skipped:
-                log.status_skipped('... ' + test)
-        if tests_xpassed:
-            log.status_xpass('%d xpass' % len(tests_xpassed))
-            for test in tests_xpassed:
-                log.status_xpass('... ' + test)
-        if tests_xfailed:
-            log.status_xfail('%d xfail' % len(tests_xfailed))
-            for test in tests_xfailed:
-                log.status_xfail('... ' + test)
-        if tests_failed:
-            log.status_fail('%d failed' % len(tests_failed))
-            for test in tests_failed:
-                log.status_fail('... ' + test)
-        if tests_not_run:
-            log.status_fail('%d not run' % len(tests_not_run))
-            for test in tests_not_run:
-                log.status_fail('... ' + test)
+        with log.section('Status Report', 'status_report'):
+            log.status_pass('%d passed' % len(tests_passed))
+            if tests_warning:
+                log.status_warning('%d passed with warning' % len(tests_warning))
+                for test in tests_warning:
+                    anchor = anchors.get(test, None)
+                    log.status_warning('... ' + test, anchor)
+            if tests_skipped:
+                log.status_skipped('%d skipped' % len(tests_skipped))
+                for test in tests_skipped:
+                    anchor = anchors.get(test, None)
+                    log.status_skipped('... ' + test, anchor)
+            if tests_xpassed:
+                log.status_xpass('%d xpass' % len(tests_xpassed))
+                for test in tests_xpassed:
+                    anchor = anchors.get(test, None)
+                    log.status_xpass('... ' + test, anchor)
+            if tests_xfailed:
+                log.status_xfail('%d xfail' % len(tests_xfailed))
+                for test in tests_xfailed:
+                    anchor = anchors.get(test, None)
+                    log.status_xfail('... ' + test, anchor)
+            if tests_failed:
+                log.status_fail('%d failed' % len(tests_failed))
+                for test in tests_failed:
+                    anchor = anchors.get(test, None)
+                    log.status_fail('... ' + test, anchor)
+            if tests_not_run:
+                log.status_fail('%d not run' % len(tests_not_run))
+                for test in tests_not_run:
+                    anchor = anchors.get(test, None)
+                    log.status_fail('... ' + test, anchor)
         log.close()
 atexit.register(cleanup)
 
@@ -339,12 +434,12 @@ def setup_boardspec(item):
     for board in mark.args:
         if board.startswith('!'):
             if ubconfig.board_type == board[1:]:
-                pytest.skip('board not supported')
+                pytest.skip('board "%s" not supported' % ubconfig.board_type)
                 return
         else:
             required_boards.append(board)
     if required_boards and ubconfig.board_type not in required_boards:
-        pytest.skip('board not supported')
+        pytest.skip('board "%s" not supported' % ubconfig.board_type)
 
 def setup_buildconfigspec(item):
     """Process any 'buildconfigspec' marker for a test.
@@ -365,7 +460,38 @@ def setup_buildconfigspec(item):
         return
     for option in mark.args:
         if not ubconfig.buildconfig.get('config_' + option.lower(), None):
-            pytest.skip('.config feature not enabled')
+            pytest.skip('.config feature "%s" not enabled' % option.lower())
+
+def tool_is_in_path(tool):
+    for path in os.environ["PATH"].split(os.pathsep):
+        fn = os.path.join(path, tool)
+        if os.path.isfile(fn) and os.access(fn, os.X_OK):
+            return True
+    return False
+
+def setup_requiredtool(item):
+    """Process any 'requiredtool' marker for a test.
+
+    Such a marker lists some external tool (binary, executable, application)
+    that the test requires. If tests are being executed on a system that
+    doesn't have the required tool, the test is marked to be skipped.
+
+    Args:
+        item: The pytest test item.
+
+    Returns:
+        Nothing.
+    """
+
+    mark = item.get_marker('requiredtool')
+    if not mark:
+        return
+    for tool in mark.args:
+        if not tool_is_in_path(tool):
+            pytest.skip('tool "%s" not in $PATH' % tool)
+
+def start_test_section(item):
+    anchors[item.name] = log.start_section(item.name)
 
 def pytest_runtest_setup(item):
     """pytest hook: Configure (set up) a test item.
@@ -380,9 +506,10 @@ def pytest_runtest_setup(item):
         Nothing.
     """
 
-    log.start_section(item.name)
+    start_test_section(item)
     setup_boardspec(item)
     setup_buildconfigspec(item)
+    setup_requiredtool(item)
 
 def pytest_runtest_protocol(item, nextitem):
     """pytest hook: Called to execute a test.
@@ -398,12 +525,27 @@ def pytest_runtest_protocol(item, nextitem):
         A list of pytest reports (test result data).
     """
 
+    log.get_and_reset_warning()
     reports = runtestprotocol(item, nextitem=nextitem)
+    was_warning = log.get_and_reset_warning()
+
+    # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
+    # the test is skipped. That call is required to create the test's section
+    # in the log file. The call to log.end_section() requires that the log
+    # contain a section for this test. Create a section for the test if it
+    # doesn't already exist.
+    if not item.name in anchors:
+        start_test_section(item)
 
     failure_cleanup = False
-    test_list = tests_passed
-    msg = 'OK'
-    msg_log = log.status_pass
+    if not was_warning:
+        test_list = tests_passed
+        msg = 'OK'
+        msg_log = log.status_pass
+    else:
+        test_list = tests_warning
+        msg = 'OK (with warning)'
+        msg_log = log.status_warning
     for report in reports:
         if report.outcome == 'failed':
             if hasattr(report, 'wasxfail'):
@@ -430,7 +572,7 @@ def pytest_runtest_protocol(item, nextitem):
     if failure_cleanup:
         console.drain_console()
 
-    test_list.add(item.name)
+    test_list.append(item.name)
     tests_not_run.remove(item.name)
 
     try: