1 # Copyright (c) 2015 Stephen Warren
2 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4 # SPDX-License-Identifier: GPL-2.0
6 # Implementation of pytest run-time hook functions. These are invoked by
7 # pytest at certain points during operation, e.g. startup, for each executed
8 # test, at shutdown etc. These hooks perform functions such as:
9 # - Parsing custom command-line options.
10 # - Pullilng in user-specified board configuration.
11 # - Creating the U-Boot console test fixture.
12 # - Creating the HTML log file.
13 # - Monitoring each test's results.
14 # - Implementing custom pytest markers.
22 from _pytest.runner import runtestprotocol
27 # Globals: The HTML log file, and the connection to the U-Boot console.
32 """Create a directory path.
34 This includes creating any intermediate/parent directories. Any errors
35 caused due to already extant directories are ignored.
38 path: The directory path to create.
46 except OSError as exc:
47 if exc.errno == errno.EEXIST and os.path.isdir(path):
52 def pytest_addoption(parser):
53 """pytest hook: Add custom command-line options to the cmdline parser.
56 parser: The pytest command-line parser.
62 parser.addoption('--build-dir', default=None,
63 help='U-Boot build directory (O=)')
64 parser.addoption('--result-dir', default=None,
65 help='U-Boot test result/tmp directory')
66 parser.addoption('--persistent-data-dir', default=None,
67 help='U-Boot test persistent generated data directory')
68 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69 help='U-Boot board type')
70 parser.addoption('--board-identity', '--id', default='na',
71 help='U-Boot board identity/instance')
72 parser.addoption('--build', default=False, action='store_true',
73 help='Compile U-Boot before running tests')
75 def pytest_configure(config):
76 """pytest hook: Perform custom initialization at startup time.
79 config: The pytest configuration.
89 test_py_dir = os.path.dirname(os.path.abspath(__file__))
90 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
92 board_type = config.getoption('board_type')
93 board_type_filename = board_type.replace('-', '_')
95 board_identity = config.getoption('board_identity')
96 board_identity_filename = board_identity.replace('-', '_')
98 build_dir = config.getoption('build_dir')
100 build_dir = source_dir + '/build-' + board_type
103 result_dir = config.getoption('result_dir')
105 result_dir = build_dir
108 persistent_data_dir = config.getoption('persistent_data_dir')
109 if not persistent_data_dir:
110 persistent_data_dir = build_dir + '/persistent-data'
111 mkdir_p(persistent_data_dir)
113 import multiplexed_log
114 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
116 if config.getoption('build'):
117 if build_dir != source_dir:
118 o_opt = 'O=%s' % build_dir
122 ['make', o_opt, '-s', board_type + '_defconfig'],
123 ['make', o_opt, '-s', '-j8'],
125 runner = log.get_runner('make', sys.stdout)
127 runner.run(cmd, cwd=source_dir)
130 class ArbitraryAttributeContainer(object):
133 ubconfig = ArbitraryAttributeContainer()
134 ubconfig.brd = dict()
135 ubconfig.env = dict()
138 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
139 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
140 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
141 board_identity_filename),
143 for (dict_to_fill, module_name) in modules:
145 module = __import__(module_name)
148 dict_to_fill.update(module.__dict__)
150 ubconfig.buildconfig = dict()
152 for conf_file in ('.config', 'include/autoconf.mk'):
153 dot_config = build_dir + '/' + conf_file
154 if not os.path.exists(dot_config):
155 raise Exception(conf_file + ' does not exist; ' +
156 'try passing --build option?')
158 with open(dot_config, 'rt') as f:
159 ini_str = '[root]\n' + f.read()
160 ini_sio = StringIO.StringIO(ini_str)
161 parser = ConfigParser.RawConfigParser()
162 parser.readfp(ini_sio)
163 ubconfig.buildconfig.update(parser.items('root'))
165 ubconfig.test_py_dir = test_py_dir
166 ubconfig.source_dir = source_dir
167 ubconfig.build_dir = build_dir
168 ubconfig.result_dir = result_dir
169 ubconfig.persistent_data_dir = persistent_data_dir
170 ubconfig.board_type = board_type
171 ubconfig.board_identity = board_identity
180 'persistent_data_dir',
183 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
185 if board_type == 'sandbox':
186 import u_boot_console_sandbox
187 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
189 import u_boot_console_exec_attach
190 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
192 def pytest_generate_tests(metafunc):
193 """pytest hook: parameterize test functions based on custom rules.
195 If a test function takes parameter(s) (fixture names) of the form brd__xxx
196 or env__xxx, the brd and env configuration dictionaries are consulted to
197 find the list of values to use for those parameters, and the test is
198 parametrized so that it runs once for each combination of values.
201 metafunc: The pytest test function.
208 'brd': console.config.brd,
209 'env': console.config.env,
211 for fn in metafunc.fixturenames:
212 parts = fn.split('__')
215 if parts[0] not in subconfigs:
217 subconfig = subconfigs[parts[0]]
219 val = subconfig.get(fn, [])
220 # If that exact name is a key in the data source:
222 # ... use the dict value as a single parameter value.
225 # ... otherwise, see if there's a key that contains a list of
226 # values to use instead.
227 vals = subconfig.get(fn + 's', [])
228 def fixture_id(index, val):
230 return val["fixture_id"]
232 return fn + str(index)
233 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
234 metafunc.parametrize(fn, vals, ids=ids)
236 @pytest.fixture(scope='function')
237 def u_boot_console(request):
238 """Generate the value of a test's u_boot_console fixture.
241 request: The pytest request.
247 console.ensure_spawned()
250 tests_not_run = set()
252 tests_xpassed = set()
253 tests_xfailed = set()
254 tests_skipped = set()
257 def pytest_itemcollected(item):
258 """pytest hook: Called once for each test found during collection.
260 This enables our custom result analysis code to see the list of all tests
261 that should eventually be run.
264 item: The item that was collected.
270 tests_not_run.add(item.name)
273 """Clean up all global state.
275 Executed (via atexit) once the entire test process is complete. This
276 includes logging the status of all tests, and the identity of any failed
289 log.status_pass('%d passed' % len(tests_passed))
291 log.status_skipped('%d skipped' % len(tests_skipped))
292 for test in tests_skipped:
293 log.status_skipped('... ' + test)
295 log.status_xpass('%d xpass' % len(tests_xpassed))
296 for test in tests_xpassed:
297 log.status_xpass('... ' + test)
299 log.status_xfail('%d xfail' % len(tests_xfailed))
300 for test in tests_xfailed:
301 log.status_xfail('... ' + test)
303 log.status_fail('%d failed' % len(tests_failed))
304 for test in tests_failed:
305 log.status_fail('... ' + test)
307 log.status_fail('%d not run' % len(tests_not_run))
308 for test in tests_not_run:
309 log.status_fail('... ' + test)
311 atexit.register(cleanup)
313 def setup_boardspec(item):
314 """Process any 'boardspec' marker for a test.
316 Such a marker lists the set of board types that a test does/doesn't
317 support. If tests are being executed on an unsupported board, the test is
318 marked to be skipped.
321 item: The pytest test item.
327 mark = item.get_marker('boardspec')
331 for board in mark.args:
332 if board.startswith('!'):
333 if ubconfig.board_type == board[1:]:
334 pytest.skip('board not supported')
337 required_boards.append(board)
338 if required_boards and ubconfig.board_type not in required_boards:
339 pytest.skip('board not supported')
341 def setup_buildconfigspec(item):
342 """Process any 'buildconfigspec' marker for a test.
344 Such a marker lists some U-Boot configuration feature that the test
345 requires. If tests are being executed on an U-Boot build that doesn't
346 have the required feature, the test is marked to be skipped.
349 item: The pytest test item.
355 mark = item.get_marker('buildconfigspec')
358 for option in mark.args:
359 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
360 pytest.skip('.config feature not enabled')
362 def pytest_runtest_setup(item):
363 """pytest hook: Configure (set up) a test item.
365 Called once for each test to perform any custom configuration. This hook
366 is used to skip the test if certain conditions apply.
369 item: The pytest test item.
375 log.start_section(item.name)
376 setup_boardspec(item)
377 setup_buildconfigspec(item)
379 def pytest_runtest_protocol(item, nextitem):
380 """pytest hook: Called to execute a test.
382 This hook wraps the standard pytest runtestprotocol() function in order
383 to acquire visibility into, and record, each test function's result.
386 item: The pytest test item to execute.
387 nextitem: The pytest test item that will be executed after this one.
390 A list of pytest reports (test result data).
393 reports = runtestprotocol(item, nextitem=nextitem)
395 failure_cleanup = False
396 test_list = tests_passed
398 msg_log = log.status_pass
399 for report in reports:
400 if report.outcome == 'failed':
401 if hasattr(report, 'wasxfail'):
402 test_list = tests_xpassed
404 msg_log = log.status_xpass
406 failure_cleanup = True
407 test_list = tests_failed
408 msg = 'FAILED:\n' + str(report.longrepr)
409 msg_log = log.status_fail
411 if report.outcome == 'skipped':
412 if hasattr(report, 'wasxfail'):
413 failure_cleanup = True
414 test_list = tests_xfailed
415 msg = 'XFAILED:\n' + str(report.longrepr)
416 msg_log = log.status_xfail
418 test_list = tests_skipped
419 msg = 'SKIPPED:\n' + str(report.longrepr)
420 msg_log = log.status_skipped
423 console.drain_console()
425 test_list.add(item.name)
426 tests_not_run.remove(item.name)
431 # If something went wrong with logging, it's better to let the test
432 # process continue, which may report other exceptions that triggered
433 # the logging issue (e.g. console.log wasn't created). Hence, just
434 # squash the exception. If the test setup failed due to e.g. syntax
435 # error somewhere else, this won't be seen. However, once that issue
436 # is fixed, if this exception still exists, it will then be logged as
437 # part of the test's stdout.
439 print 'Exception occurred while logging runtest status:'
440 traceback.print_exc()
441 # FIXME: Can we force a test failure here?
443 log.end_section(item.name)
446 console.cleanup_spawn()