X-Git-Url: https://git.librecmc.org/?a=blobdiff_plain;f=tools%2Fbinman%2Fbinman.py;h=52c03f68c6d0c743250ae4ebb39ab26bfc62a252;hb=8acce60b10f2d60945b71f527fd29ee62242b175;hp=439908e66508045e78563617304e3d36dffe9417;hpb=3d5ced9e22d32112a20f9dc0f5fb1f22ef088079;p=oweals%2Fu-boot.git diff --git a/tools/binman/binman.py b/tools/binman/binman.py index 439908e665..52c03f68c6 100755 --- a/tools/binman/binman.py +++ b/tools/binman/binman.py @@ -9,9 +9,13 @@ """See README for more information""" +from __future__ import print_function + +from distutils.sysconfig import get_python_lib import glob import multiprocessing import os +import site import sys import traceback import unittest @@ -26,6 +30,12 @@ sys.path.insert(0, 'scripts/dtc/pylibfdt') sys.path.insert(0, os.path.join(our_path, '../../build-sandbox_spl/scripts/dtc/pylibfdt')) +# When running under python-coverage on Ubuntu 16.04, the dist-packages +# directories are dropped from the python path. Add them in so that we can find +# the elffile module. We could use site.getsitepackages() here but unfortunately +# that is not available in a virtualenv. +sys.path.append(get_python_lib()) + import cmdline import command use_concurrent = True @@ -36,14 +46,21 @@ except: import control import test_util -def RunTests(debug, processes, args): +def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath): """Run the functional tests and any embedded doctests Args: debug: True to enable debugging, which shows a full stack trace on error + verbosity: Verbosity level to use + test_preserve_dirs: True to preserve the input directory used by tests + so that it can be examined afterwards (only useful for debugging + tests). If a single test is selected (in args[0]) it also preserves + the output directory for this test. Both directories are displayed + on the command line. + processes: Number of processes to use to run tests (None=same as #CPUs) args: List of positional args provided to binman. This can hold a test name to execute (as in 'binman -t testSections', for example) - processes: Number of processes to use to run tests (None=same as #CPUs) + toolpath: List of paths to use for tools """ import elf_test import entry_test @@ -61,8 +78,11 @@ def RunTests(debug, processes, args): sys.argv = [sys.argv[0]] if debug: sys.argv.append('-D') - if debug: - sys.argv.append('-D') + if verbosity: + sys.argv.append('-v%d' % verbosity) + if toolpath: + for path in toolpath: + sys.argv += ['--toolpath', path] # Run the entry tests first ,since these need to be the first to import the # 'entry' module. @@ -71,6 +91,12 @@ def RunTests(debug, processes, args): loader = unittest.TestLoader() for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage): + # Test the test module about our arguments, if it is interested + if hasattr(module, 'setup_test_args'): + setup_test_args = getattr(module, 'setup_test_args') + setup_test_args(preserve_indir=test_preserve_dirs, + preserve_outdirs=test_preserve_dirs and test_name is not None, + toolpath=toolpath) if test_name: try: suite.addTests(loader.loadTestsFromName(test_name, module)) @@ -85,14 +111,31 @@ def RunTests(debug, processes, args): else: suite.run(result) - print result + # Remove errors which just indicate a missing test. Since Python v3.5 If an + # ImportError or AttributeError occurs while traversing name then a + # synthetic test that raises that error when run will be returned. These + # errors are included in the errors accumulated by result.errors. + if test_name: + errors = [] + for test, err in result.errors: + if ("has no attribute '%s'" % test_name) not in err: + errors.append((test, err)) + result.testsRun -= 1 + result.errors = errors + + print(result) for test, err in result.errors: - print test.id(), err + print(test.id(), err) for test, err in result.failures: - print err, result.failures + print(err, result.failures) + if result.skipped: + print('%d binman test%s SKIPPED:' % + (len(result.skipped), 's' if len(result.skipped) > 1 else '')) + for skip_info in result.skipped: + print('%s: %s' % (skip_info[0], skip_info[1])) if result.errors or result.failures: - print 'binman tests FAILED' - return 1 + print('binman tests FAILED') + return 1 return 0 def GetEntryModules(include_testing=True): @@ -124,14 +167,13 @@ def RunBinman(options, args): """ ret_code = 0 - # For testing: This enables full exception traces. - #options.debug = True - if not options.debug: sys.tracebacklimit = 0 if options.test: - ret_code = RunTests(options.debug, options.processes, args[1:]) + ret_code = RunTests(options.debug, options.verbosity, options.processes, + options.test_preserve_dirs, args[1:], + options.toolpath) elif options.test_coverage: RunTestCoverage() @@ -143,9 +185,9 @@ def RunBinman(options, args): try: ret_code = control.Binman(options, args) except Exception as e: - print 'binman: %s' % e + print('binman: %s' % e) if options.debug: - print + print() traceback.print_exc() ret_code = 1 return ret_code