2 # SPDX-License-Identifier: GPL-2.0+
4 # Copyright (c) 2016 Google, Inc
5 # Written by Simon Glass <sjg@chromium.org>
7 # Creates binary images from input files controlled by a description
10 """See README for more information"""
12 from __future__ import print_function
14 from distutils.sysconfig import get_python_lib
16 import multiprocessing
23 # Bring in the patman and dtoc libraries
24 our_path = os.path.dirname(os.path.realpath(__file__))
25 for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
26 sys.path.insert(0, os.path.join(our_path, dirname))
28 # Bring in the libfdt module
29 sys.path.insert(0, 'scripts/dtc/pylibfdt')
30 sys.path.insert(0, os.path.join(our_path,
31 '../../build-sandbox_spl/scripts/dtc/pylibfdt'))
33 # When running under python-coverage on Ubuntu 16.04, the dist-packages
34 # directories are dropped from the python path. Add them in so that we can find
35 # the elffile module. We could use site.getsitepackages() here but unfortunately
36 # that is not available in a virtualenv.
37 sys.path.append(get_python_lib())
43 from concurrencytest import ConcurrentTestSuite, fork_for_tests
45 use_concurrent = False
49 def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
50 """Run the functional tests and any embedded doctests
53 debug: True to enable debugging, which shows a full stack trace on error
54 verbosity: Verbosity level to use
55 test_preserve_dirs: True to preserve the input directory used by tests
56 so that it can be examined afterwards (only useful for debugging
57 tests). If a single test is selected (in args[0]) it also preserves
58 the output directory for this test. Both directories are displayed
60 processes: Number of processes to use to run tests (None=same as #CPUs)
61 args: List of positional args provided to binman. This can hold a test
62 name to execute (as in 'binman -t testSections', for example)
63 toolpath: List of paths to use for tools
73 result = unittest.TestResult()
75 suite = doctest.DocTestSuite(module)
78 sys.argv = [sys.argv[0]]
82 sys.argv.append('-v%d' % verbosity)
85 sys.argv += ['--toolpath', path]
87 # Run the entry tests first ,since these need to be the first to import the
89 test_name = args and args[0] or None
90 suite = unittest.TestSuite()
91 loader = unittest.TestLoader()
92 for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
93 elf_test.TestElf, image_test.TestImage):
94 # Test the test module about our arguments, if it is interested
95 if hasattr(module, 'setup_test_args'):
96 setup_test_args = getattr(module, 'setup_test_args')
97 setup_test_args(preserve_indir=test_preserve_dirs,
98 preserve_outdirs=test_preserve_dirs and test_name is not None,
102 suite.addTests(loader.loadTestsFromName(test_name, module))
103 except AttributeError:
106 suite.addTests(loader.loadTestsFromTestCase(module))
107 if use_concurrent and processes != 1:
108 concurrent_suite = ConcurrentTestSuite(suite,
109 fork_for_tests(processes or multiprocessing.cpu_count()))
110 concurrent_suite.run(result)
114 # Remove errors which just indicate a missing test. Since Python v3.5 If an
115 # ImportError or AttributeError occurs while traversing name then a
116 # synthetic test that raises that error when run will be returned. These
117 # errors are included in the errors accumulated by result.errors.
120 for test, err in result.errors:
121 if ("has no attribute '%s'" % test_name) not in err:
122 errors.append((test, err))
124 result.errors = errors
127 for test, err in result.errors:
128 print(test.id(), err)
129 for test, err in result.failures:
130 print(err, result.failures)
132 print('%d binman test%s SKIPPED:' %
133 (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
134 for skip_info in result.skipped:
135 print('%s: %s' % (skip_info[0], skip_info[1]))
136 if result.errors or result.failures:
137 print('binman tests FAILED')
141 def GetEntryModules(include_testing=True):
142 """Get a set of entry class implementations
145 Set of paths to entry class filenames
147 glob_list = glob.glob(os.path.join(our_path, 'etype/*.py'))
148 return set([os.path.splitext(os.path.basename(item))[0]
149 for item in glob_list
150 if include_testing or '_testing' not in item])
152 def RunTestCoverage():
153 """Run the tests and check that we get 100% coverage"""
154 glob_list = GetEntryModules(False)
155 all_set = set([os.path.splitext(os.path.basename(item))[0]
156 for item in glob_list if '_testing' not in item])
157 test_util.RunTestCoverage('tools/binman/binman.py', None,
158 ['*test*', '*binman.py', 'tools/patman/*', 'tools/dtoc/*'],
159 options.build_dir, all_set)
161 def RunBinman(options, args):
162 """Main entry point to binman once arguments are parsed
165 options: Command-line options
166 args: Non-option arguments
170 if not options.debug:
171 sys.tracebacklimit = 0
174 ret_code = RunTests(options.debug, options.verbosity, options.processes,
175 options.test_preserve_dirs, args[1:],
178 elif options.test_coverage:
181 elif options.entry_docs:
182 control.WriteEntryDocs(GetEntryModules())
186 ret_code = control.Binman(options, args)
187 except Exception as e:
188 print('binman: %s' % e)
191 traceback.print_exc()
196 if __name__ == "__main__":
197 (options, args) = cmdline.ParseArgs(sys.argv)
198 ret_code = RunBinman(options, args)