Files
littlefs/scripts/test.py
Christopher Haster bfbe44e70d Dropped permutation number for full leb16-encoded defines
This is probably how the test runner should have been implemented in the
first place, but it took a few tries to get here.

This makes it so the test identifier, which is a bit longer now, fully
encodes the state of the defines in the test. This removes the need for
the extra geometry field and allows reproduction of tests with custom
defines at runtime.

The test runner may have already seemed like a solved problem, but these
changes are really to enable repurposing the test runner as a bench
runner.
2022-09-10 15:19:34 -05:00

1098 lines
41 KiB
Python
Executable File

#!/usr/bin/env python3
#
# Script to compile and runs tests.
#
import collections as co
import errno
import glob
import itertools as it
import math as m
import os
import pty
import re
import shlex
import shutil
import signal
import subprocess as sp
import threading as th
import time
import toml
RUNNER_PATH = 'runners/test_runner'
HEADER_PATH = 'runners/test_runner.h'
def openio(path, mode='r', buffering=-1, nb=False):
if path == '-':
if 'r' in mode:
return os.fdopen(os.dup(sys.stdin.fileno()), 'r', buffering)
else:
return os.fdopen(os.dup(sys.stdout.fileno()), 'w', buffering)
elif nb and 'a' in mode:
return os.fdopen(os.open(
path,
os.O_WRONLY | os.O_CREAT | os.O_APPEND | os.O_NONBLOCK,
0o666),
mode,
buffering)
else:
return open(path, mode, buffering)
class TestCase:
# create a TestCase object from a config
def __init__(self, config, args={}):
self.name = config.pop('name')
self.path = config.pop('path')
self.suite = config.pop('suite')
self.lineno = config.pop('lineno', None)
self.if_ = config.pop('if', None)
if isinstance(self.if_, bool):
self.if_ = 'true' if self.if_ else 'false'
self.code = config.pop('code')
self.code_lineno = config.pop('code_lineno', None)
self.in_ = config.pop('in',
config.pop('suite_in', None))
self.reentrant = config.pop('reentrant',
config.pop('suite_reentrant', False))
# figure out defines and build possible permutations
self.defines = set()
self.permutations = []
suite_defines = config.pop('suite_defines', {})
if not isinstance(suite_defines, list):
suite_defines = [suite_defines]
defines = config.pop('defines', {})
if not isinstance(defines, list):
defines = [defines]
# build possible permutations
for suite_defines_ in suite_defines:
self.defines |= suite_defines_.keys()
for defines_ in defines:
self.defines |= defines_.keys()
self.permutations.extend(map(dict, it.product(*(
[(k, v) for v in (vs if isinstance(vs, list) else [vs])]
for k, vs in sorted(
(suite_defines_ | defines_).items())))))
for k in config.keys():
print('%swarning:%s in %s, found unused key %r' % (
'\x1b[01;33m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
self.id(),
k),
file=sys.stderr)
def id(self):
return '%s:%s' % (self.suite, self.name)
class TestSuite:
# create a TestSuite object from a toml file
def __init__(self, path, args={}):
self.path = path
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
# load toml file and parse test cases
with open(self.path) as f:
# load tests
config = toml.load(f)
# find line numbers
f.seek(0)
case_linenos = []
code_linenos = []
for i, line in enumerate(f):
match = re.match(
'(?P<case>\[\s*cases\s*\.\s*(?P<name>\w+)\s*\])'
'|' '(?P<code>code\s*=)',
line)
if match and match.group('case'):
case_linenos.append((i+1, match.group('name')))
elif match and match.group('code'):
code_linenos.append(i+2)
# sort in case toml parsing did not retain order
case_linenos.sort()
cases = config.pop('cases')
for (lineno, name), (nlineno, _) in it.zip_longest(
case_linenos, case_linenos[1:],
fillvalue=(float('inf'), None)):
code_lineno = min(
(l for l in code_linenos if l >= lineno and l < nlineno),
default=None)
cases[name]['lineno'] = lineno
cases[name]['code_lineno'] = code_lineno
self.if_ = config.pop('if', None)
if isinstance(self.if_, bool):
self.if_ = 'true' if self.if_ else 'false'
self.code = config.pop('code', None)
self.code_lineno = min(
(l for l in code_linenos
if not case_linenos or l < case_linenos[0][0]),
default=None)
# a couple of these we just forward to all cases
defines = config.pop('defines', {})
in_ = config.pop('in', None)
reentrant = config.pop('reentrant', False)
self.cases = []
for name, case in sorted(cases.items(),
key=lambda c: c[1].get('lineno')):
self.cases.append(TestCase(config={
'name': name,
'path': path + (':%d' % case['lineno']
if 'lineno' in case else ''),
'suite': self.name,
'suite_defines': defines,
'suite_in': in_,
'suite_reentrant': reentrant,
**case},
args=args))
# combine per-case defines
self.defines = set.union(*(
set(case.defines) for case in self.cases))
# combine other per-case things
self.reentrant = any(case.reentrant for case in self.cases)
for k in config.keys():
print('%swarning:%s in %s, found unused key %r' % (
'\x1b[01;33m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
self.id(),
k),
file=sys.stderr)
def id(self):
return self.name
def compile(test_paths, **args):
# find .toml files
paths = []
for path in test_paths:
if os.path.isdir(path):
path = path + '/*.toml'
for path in glob.glob(path):
paths.append(path)
if not paths:
print('no test suites found in %r?' % test_paths)
sys.exit(-1)
if not args.get('source'):
if len(paths) > 1:
print('more than one test suite for compilation? (%r)' % test_paths)
sys.exit(-1)
# load our suite
suite = TestSuite(paths[0], args)
else:
# load all suites
suites = [TestSuite(path, args) for path in paths]
suites.sort(key=lambda s: s.name)
# write generated test source
if 'output' in args:
with openio(args['output'], 'w') as f:
_write = f.write
def write(s):
f.lineno += s.count('\n')
_write(s)
def writeln(s=''):
f.lineno += s.count('\n') + 1
_write(s)
_write('\n')
f.lineno = 1
f.write = write
f.writeln = writeln
f.writeln("// Generated by %s:" % sys.argv[0])
f.writeln("//")
f.writeln("// %s" % ' '.join(sys.argv))
f.writeln("//")
f.writeln()
# include test_runner.h in every generated file
f.writeln("#include \"%s\"" % args['include'])
f.writeln()
# write out generated functions, this can end up in different
# files depending on the "in" attribute
#
# note it's up to the specific generated file to declare
# the test defines
def write_case_functions(f, suite, case):
# create case define functions
if case.defines:
# deduplicate defines by value to try to reduce the
# number of functions we generate
define_cbs = {}
for i, defines in enumerate(case.permutations):
for k, v in sorted(defines.items()):
if v not in define_cbs:
name = ('__test__%s__%s__%s__%d'
% (suite.name, case.name, k, i))
define_cbs[v] = name
f.writeln('intmax_t %s('
'__attribute__((unused)) '
'void *data) {' % name)
f.writeln(4*' '+'return %s;' % v)
f.writeln('}')
f.writeln()
f.writeln('const test_define_t *const '
'__test__%s__%s__defines[] = {'
% (suite.name, case.name))
for defines in case.permutations:
f.writeln(4*' '+'(const test_define_t['
'TEST_IMPLICIT_DEFINE_COUNT+%d]){' % (
len(suite.defines)))
for k, v in sorted(defines.items()):
f.writeln(8*' '+'[%-24s] = {%s, NULL},' % (
k+'_i', define_cbs[v]))
f.writeln(4*' '+'},')
f.writeln('};')
f.writeln()
# create case filter function
if suite.if_ is not None or case.if_ is not None:
f.writeln('bool __test__%s__%s__filter(void) {'
% (suite.name, case.name))
f.writeln(4*' '+'return %s;'
% ' && '.join('(%s)' % if_
for if_ in [suite.if_, case.if_]
if if_ is not None))
f.writeln('}')
f.writeln()
# create case run function
f.writeln('void __test__%s__%s__run('
'__attribute__((unused)) struct lfs_config *cfg) {'
% (suite.name, case.name))
f.writeln(4*' '+'// test case %s' % case.id())
if case.code_lineno is not None:
f.writeln(4*' '+'#line %d "%s"'
% (case.code_lineno, suite.path))
f.write(case.code)
if case.code_lineno is not None:
f.writeln(4*' '+'#line %d "%s"'
% (f.lineno+1, args['output']))
f.writeln('}')
f.writeln()
if not args.get('source'):
if suite.code is not None:
if suite.code_lineno is not None:
f.writeln('#line %d "%s"'
% (suite.code_lineno, suite.path))
f.write(suite.code)
if suite.code_lineno is not None:
f.writeln('#line %d "%s"'
% (f.lineno+1, args['output']))
f.writeln()
if suite.defines:
for i, define in enumerate(sorted(suite.defines)):
f.writeln('#ifndef %s' % define)
f.writeln('#define %-24s '
'TEST_IMPLICIT_DEFINE_COUNT+%d' % (define+'_i', i))
f.writeln('#define %-24s '
'test_define(%s)' % (define, define+'_i'))
f.writeln('#endif')
f.writeln()
# create case functions
for case in suite.cases:
if case.in_ is None:
write_case_functions(f, suite, case)
else:
if case.defines:
f.writeln('extern const test_define_t *const '
'__test__%s__%s__defines[];'
% (suite.name, case.name))
if suite.if_ is not None or case.if_ is not None:
f.writeln('extern bool __test__%s__%s__filter('
'void);'
% (suite.name, case.name))
f.writeln('extern void __test__%s__%s__run('
'struct lfs_config *cfg);'
% (suite.name, case.name))
f.writeln()
# create suite struct
f.writeln('__attribute__((section("_test_suites")))')
f.writeln('const struct test_suite __test__%s__suite = {'
% suite.name)
f.writeln(4*' '+'.id = "%s",' % suite.id())
f.writeln(4*' '+'.name = "%s",' % suite.name)
f.writeln(4*' '+'.path = "%s",' % suite.path)
f.writeln(4*' '+'.flags = %s,'
% (' | '.join(filter(None, [
'TEST_REENTRANT' if suite.reentrant else None]))
or 0))
if suite.defines:
# create suite define names
f.writeln(4*' '+'.define_names = (const char *const['
'TEST_IMPLICIT_DEFINE_COUNT+%d]){' % (
len(suite.defines)))
for k in sorted(suite.defines):
f.writeln(8*' '+'[%-24s] = "%s",' % (k+'_i', k))
f.writeln(4*' '+'},')
f.writeln(4*' '+'.define_count = '
'TEST_IMPLICIT_DEFINE_COUNT+%d,' % len(suite.defines))
f.writeln(4*' '+'.cases = (const struct test_case[]){')
for case in suite.cases:
# create case structs
f.writeln(8*' '+'{')
f.writeln(12*' '+'.id = "%s",' % case.id())
f.writeln(12*' '+'.name = "%s",' % case.name)
f.writeln(12*' '+'.path = "%s",' % case.path)
f.writeln(12*' '+'.flags = %s,'
% (' | '.join(filter(None, [
'TEST_REENTRANT' if case.reentrant else None]))
or 0))
f.writeln(12*' '+'.permutations = %d,'
% len(case.permutations))
if case.defines:
f.writeln(12*' '+'.defines = __test__%s__%s__defines,'
% (suite.name, case.name))
if suite.if_ is not None or case.if_ is not None:
f.writeln(12*' '+'.filter = __test__%s__%s__filter,'
% (suite.name, case.name))
f.writeln(12*' '+'.run = __test__%s__%s__run,'
% (suite.name, case.name))
f.writeln(8*' '+'},')
f.writeln(4*' '+'},')
f.writeln(4*' '+'.case_count = %d,' % len(suite.cases))
f.writeln('};')
f.writeln()
else:
# copy source
f.writeln('#line 1 "%s"' % args['source'])
with open(args['source']) as sf:
shutil.copyfileobj(sf, f)
f.writeln()
# write any internal tests
for suite in suites:
for case in suite.cases:
if (case.in_ is not None
and os.path.normpath(case.in_)
== os.path.normpath(args['source'])):
# write defines, but note we need to undef any
# new defines since we're in someone else's file
if suite.defines:
for i, define in enumerate(
sorted(suite.defines)):
f.writeln('#ifndef %s' % define)
f.writeln('#define %-24s '
'TEST_IMPLICIT_DEFINE_COUNT+%d' % (
define+'_i', i))
f.writeln('#define %-24s '
'test_define(%s)' % (
define, define+'_i'))
f.writeln('#define '
'__TEST__%s__NEEDS_UNDEF' % (
define))
f.writeln('#endif')
f.writeln()
write_case_functions(f, suite, case)
if suite.defines:
for define in sorted(suite.defines):
f.writeln('#ifdef __TEST__%s__NEEDS_UNDEF'
% define)
f.writeln('#undef __TEST__%s__NEEDS_UNDEF'
% define)
f.writeln('#undef %s' % define)
f.writeln('#undef %s' % (define+'_i'))
f.writeln('#endif')
f.writeln()
def find_runner(runner, test_ids, **args):
cmd = runner.copy()
cmd.extend(test_ids)
# run under some external command?
cmd[:0] = args.get('exec', [])
# run under valgrind?
if args.get('valgrind'):
cmd[:0] = filter(None, [
'valgrind',
'--leak-check=full',
'--track-origins=yes',
'--error-exitcode=4',
'-q'])
# other context
if args.get('geometry'):
cmd.append('-g%s' % args['geometry'])
if args.get('powerloss'):
cmd.append('-p%s' % args['powerloss'])
if args.get('disk'):
cmd.append('-d%s' % args['disk'])
if args.get('trace'):
cmd.append('-t%s' % args['trace'])
if args.get('read_sleep'):
cmd.append('--read-sleep=%s' % args['read_sleep'])
if args.get('prog_sleep'):
cmd.append('--prog-sleep=%s' % args['prog_sleep'])
if args.get('erase_sleep'):
cmd.append('--erase-sleep=%s' % args['erase_sleep'])
# defines?
if args.get('define'):
for define in args.get('define'):
cmd.append('-D%s' % define)
return cmd
def list_(runner, test_ids, **args):
cmd = find_runner(runner, test_ids, **args)
if args.get('summary'): cmd.append('--summary')
if args.get('list_suites'): cmd.append('--list-suites')
if args.get('list_cases'): cmd.append('--list-cases')
if args.get('list_suite_paths'): cmd.append('--list-suite-paths')
if args.get('list_case_paths'): cmd.append('--list-case-paths')
if args.get('list_defines'): cmd.append('--list-defines')
if args.get('list_permutation_defines'):
cmd.append('--list-permutation-defines')
if args.get('list_implicit_defines'):
cmd.append('--list-implicit-defines')
if args.get('list_geometries'): cmd.append('--list-geometries')
if args.get('list_powerlosses'): cmd.append('--list-powerlosses')
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
return sp.call(cmd)
def find_cases(runner_, **args):
# query from runner
cmd = runner_ + ['--list-cases']
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
expected_suite_perms = co.defaultdict(lambda: 0)
expected_case_perms = co.defaultdict(lambda: 0)
expected_perms = 0
total_perms = 0
pattern = re.compile(
'^(?P<id>(?P<case>(?P<suite>[^:]+):[^\s:]+)[^\s]*)\s+'
'[^\s]+\s+(?P<filtered>\d+)/(?P<perms>\d+)')
# skip the first line
for line in it.islice(proc.stdout, 1, None):
m = pattern.match(line)
if m:
filtered = int(m.group('filtered'))
perms = int(m.group('perms'))
expected_suite_perms[m.group('suite')] += filtered
expected_case_perms[m.group('id')] += filtered
expected_perms += filtered
total_perms += perms
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
return (
expected_suite_perms,
expected_case_perms,
expected_perms,
total_perms)
def find_path(runner_, id, **args):
# query from runner
cmd = runner_ + ['--list-case-paths', id]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
path = None
pattern = re.compile(
'^(?P<id>(?P<case>(?P<suite>[^:]+):[^\s:]+)[^\s]*)\s+'
'(?P<path>[^:]+):(?P<lineno>\d+)')
# skip the first line
for line in it.islice(proc.stdout, 1, None):
m = pattern.match(line)
if m and path is None:
path_ = m.group('path')
lineno = int(m.group('lineno'))
path = (path_, lineno)
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
return path
def find_defines(runner_, id, **args):
# query permutation defines from runner
cmd = runner_ + ['--list-permutation-defines', id]
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.PIPE if not args.get('verbose') else None,
universal_newlines=True,
errors='replace',
close_fds=False)
defines = co.OrderedDict()
pattern = re.compile('^(?P<define>\w+)=(?P<value>.+)')
for line in proc.stdout:
m = pattern.match(line)
if m:
define = m.group('define')
value = m.group('value')
defines[define] = value
proc.wait()
if proc.returncode != 0:
if not args.get('verbose'):
for line in proc.stderr:
sys.stdout.write(line)
sys.exit(-1)
return defines
class TestFailure(Exception):
def __init__(self, id, returncode, stdout, assert_=None):
self.id = id
self.returncode = returncode
self.stdout = stdout
self.assert_ = assert_
def run_stage(name, runner_, **args):
# get expected suite/case/perm counts
expected_suite_perms, expected_case_perms, expected_perms, total_perms = (
find_cases(runner_, **args))
passed_suite_perms = co.defaultdict(lambda: 0)
passed_case_perms = co.defaultdict(lambda: 0)
passed_perms = 0
powerlosses = 0
failures = []
killed = False
pattern = re.compile('^(?:'
'(?P<op>running|finished|skipped|powerloss) '
'(?P<id>(?P<case>(?P<suite>[^:]+):[^\s:]+)[^\s]*)'
'|' '(?P<path>[^:]+):(?P<lineno>\d+):(?P<op_>assert):'
' *(?P<message>.*)' ')$')
locals = th.local()
children = set()
def run_runner(runner_):
nonlocal passed_suite_perms
nonlocal passed_case_perms
nonlocal passed_perms
nonlocal powerlosses
nonlocal locals
# run the tests!
cmd = runner_.copy()
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
mpty, spty = pty.openpty()
proc = sp.Popen(cmd, stdout=spty, stderr=spty, close_fds=False)
os.close(spty)
children.add(proc)
mpty = os.fdopen(mpty, 'r', 1)
stdout = None
last_id = None
last_stdout = []
last_assert = None
try:
while True:
# parse a line for state changes
try:
line = mpty.readline()
except OSError as e:
if e.errno == errno.EIO:
break
raise
if not line:
break
last_stdout.append(line)
if args.get('stdout'):
try:
if not stdout:
stdout = openio(args['stdout'], 'a', 1, nb=True)
stdout.write(line)
except OSError as e:
if e.errno not in [
errno.ENXIO,
errno.EPIPE,
errno.EAGAIN]:
raise
stdout = None
if args.get('verbose'):
sys.stdout.write(line)
m = pattern.match(line)
if m:
op = m.group('op') or m.group('op_')
if op == 'running':
locals.seen_perms += 1
last_id = m.group('id')
last_stdout = []
last_assert = None
elif op == 'powerloss':
last_id = m.group('id')
powerlosses += 1
elif op == 'finished':
passed_suite_perms[m.group('suite')] += 1
passed_case_perms[m.group('case')] += 1
passed_perms += 1
elif op == 'skipped':
locals.seen_perms += 1
elif op == 'assert':
last_assert = (
m.group('path'),
int(m.group('lineno')),
m.group('message'))
# go ahead and kill the process, aborting takes a while
if args.get('keep_going'):
proc.kill()
except KeyboardInterrupt:
raise TestFailure(last_id, 1, last_stdout)
finally:
children.remove(proc)
mpty.close()
proc.wait()
if proc.returncode != 0:
raise TestFailure(
last_id,
proc.returncode,
last_stdout,
last_assert)
def run_job(runner, start=None, step=None):
nonlocal failures
nonlocal killed
nonlocal locals
start = start or 0
step = step or 1
while start < total_perms:
runner_ = runner.copy()
if args.get('isolate') or args.get('valgrind'):
runner_.append('-s%s,%s,%s' % (start, start+step, step))
else:
runner_.append('-s%s,,%s' % (start, step))
try:
# run the tests
locals.seen_perms = 0
run_runner(runner_)
assert locals.seen_perms > 0
start += locals.seen_perms*step
except TestFailure as failure:
# race condition for multiple failures?
if failures and not args.get('keep_going'):
break
failures.append(failure)
if args.get('keep_going') and not killed:
# resume after failed test
assert locals.seen_perms > 0
start += locals.seen_perms*step
continue
else:
# stop other tests
killed = True
for child in children.copy():
child.kill()
break
# parallel jobs?
runners = []
if 'jobs' in args:
for job in range(args['jobs']):
runners.append(th.Thread(
target=run_job, args=(runner_, job, args['jobs']),
daemon=True))
else:
runners.append(th.Thread(
target=run_job, args=(runner_, None, None),
daemon=True))
def print_update(done):
if not args.get('verbose') and (args['color'] or done):
sys.stdout.write('%s%srunning %s%s:%s %s%s' % (
'\r\x1b[K' if args['color'] else '',
'\x1b[?7l' if not done else '',
('\x1b[32m' if not failures else '\x1b[31m')
if args['color'] else '',
name,
'\x1b[m' if args['color'] else '',
', '.join(filter(None, [
'%d/%d suites' % (
sum(passed_suite_perms[k] == v
for k, v in expected_suite_perms.items()),
len(expected_suite_perms))
if (not args.get('by_suites')
and not args.get('by_cases')) else None,
'%d/%d cases' % (
sum(passed_case_perms[k] == v
for k, v in expected_case_perms.items()),
len(expected_case_perms))
if not args.get('by_cases') else None,
'%d/%d perms' % (passed_perms, expected_perms),
'%dpls!' % powerlosses
if powerlosses else None,
'%s%d/%d failures%s' % (
'\x1b[31m' if args['color'] else '',
len(failures),
expected_perms,
'\x1b[m' if args['color'] else '')
if failures else None])),
'\x1b[?7h' if not done else '\n'))
sys.stdout.flush()
for r in runners:
r.start()
try:
while any(r.is_alive() for r in runners):
time.sleep(0.01)
print_update(False)
except KeyboardInterrupt:
# this is handled by the runner threads, we just
# need to not abort here
killed = True
finally:
print_update(True)
for r in runners:
r.join()
return (
expected_perms,
passed_perms,
powerlosses,
failures,
killed)
def run(runner, test_ids, **args):
# query runner for tests
runner_ = find_runner(runner, test_ids, **args)
print('using runner: %s'
% ' '.join(shlex.quote(c) for c in runner_))
expected_suite_perms, expected_case_perms, expected_perms, total_perms = (
find_cases(runner_, **args))
print('found %d suites, %d cases, %d/%d permutations'
% (len(expected_suite_perms),
len(expected_case_perms),
expected_perms,
total_perms))
print()
# truncate and open logs here so they aren't disconnected between tests
stdout = None
if args.get('stdout'):
stdout = openio(args['stdout'], 'w', 1)
trace = None
if args.get('trace'):
trace = openio(args['trace'], 'w', 1)
# measure runtime
start = time.time()
# spawn runners
expected = 0
passed = 0
powerlosses = 0
failures = []
for by in (expected_case_perms.keys() if args.get('by_cases')
else expected_suite_perms.keys() if args.get('by_suites')
else [None]):
# rebuild runner for each stage to override test identifier if needed
stage_runner = find_runner(runner,
[by] if by is not None else test_ids, **args)
# spawn jobs for stage
expected_, passed_, powerlosses_, failures_, killed = run_stage(
by or 'tests',
stage_runner,
**args)
expected += expected_
passed += passed_
powerlosses += powerlosses_
failures.extend(failures_)
if (failures and not args.get('keep_going')) or killed:
break
stop = time.time()
if stdout:
stdout.close()
if trace:
trace.close()
# show summary
print()
print('%sdone:%s %s' % (
('\x1b[32m' if not failures else '\x1b[31m')
if args['color'] else '',
'\x1b[m' if args['color'] else '',
', '.join(filter(None, [
'%d/%d passed' % (passed, expected),
'%d/%d failed' % (len(failures), expected),
'%dpls!' % powerlosses if powerlosses else None,
'in %.2fs' % (stop-start)]))))
print()
# print each failure
for failure in failures:
assert failure.id is not None, '%s broken? %r' % (
' '.join(shlex.quote(c) for c in runner_),
failure)
# get some extra info from runner
path, lineno = find_path(runner_, failure.id, **args)
defines = find_defines(runner_, failure.id, **args)
# show summary of failure
print('%s%s:%d:%sfailure:%s %s%s failed' % (
'\x1b[01m' if args['color'] else '',
path, lineno,
'\x1b[01;31m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
failure.id,
' (%s)' % ', '.join('%s=%s' % (k,v) for k,v in defines.items())
if defines else ''))
if failure.stdout:
stdout = failure.stdout
if failure.assert_ is not None:
stdout = stdout[:-1]
for line in stdout[-args.get('context', 5):]:
sys.stdout.write(line)
if failure.assert_ is not None:
path, lineno, message = failure.assert_
print('%s%s:%d:%sassert:%s %s' % (
'\x1b[01m' if args['color'] else '',
path, lineno,
'\x1b[01;31m' if args['color'] else '',
'\x1b[m' if args['color'] else '',
message))
with open(path) as f:
line = next(it.islice(f, lineno-1, None)).strip('\n')
print(line)
print()
# drop into gdb?
if failures and (args.get('gdb')
or args.get('gdb_case')
or args.get('gdb_main')):
failure = failures[0]
runner_ = find_runner(runner, [failure.id], **args)
if args.get('gdb_main'):
cmd = ['gdb',
'-ex', 'break main',
'-ex', 'run',
'--args'] + runner_
elif args.get('gdb_case'):
path, lineno = find_path(runner_, failure.id, **args)
cmd = ['gdb',
'-ex', 'break %s:%d' % (path, lineno),
'-ex', 'run',
'--args'] + runner_
elif failure.assert_ is not None:
cmd = ['gdb',
'-ex', 'run',
'-ex', 'frame function raise',
'-ex', 'up 2',
'--args'] + runner_
else:
cmd = ['gdb',
'-ex', 'run',
'--args'] + runner_
# exec gdb interactively
if args.get('verbose'):
print(' '.join(shlex.quote(c) for c in cmd))
os.execvp(cmd[0], cmd)
return 1 if failures else 0
def main(**args):
# figure out what color should be
if args.get('color') == 'auto':
args['color'] = sys.stdout.isatty()
elif args.get('color') == 'always':
args['color'] = True
else:
args['color'] = False
if args.get('compile'):
return compile(**args)
elif (args.get('summary')
or args.get('list_suites')
or args.get('list_cases')
or args.get('list_suite_paths')
or args.get('list_case_paths')
or args.get('list_defines')
or args.get('list_permutation_defines')
or args.get('list_implicit_defines')
or args.get('list_geometries')
or args.get('list_powerlosses')):
return list_(**args)
else:
return run(**args)
if __name__ == "__main__":
import argparse
import sys
argparse.ArgumentParser._handle_conflict_ignore = lambda *_: None
argparse._ArgumentGroup._handle_conflict_ignore = lambda *_: None
parser = argparse.ArgumentParser(
description="Build and run tests.",
conflict_handler='ignore')
parser.add_argument('-v', '--verbose', action='store_true',
help="Output commands that run behind the scenes.")
parser.add_argument('--color',
choices=['never', 'always', 'auto'], default='auto',
help="When to use terminal colors.")
# test flags
test_parser = parser.add_argument_group('test options')
test_parser.add_argument('runner', nargs='?',
type=lambda x: x.split(),
help="Test runner to use for testing. Defaults to %r." % RUNNER_PATH)
test_parser.add_argument('test_ids', nargs='*',
help="Description of tests to run.")
test_parser.add_argument('-Y', '--summary', action='store_true',
help="Show quick summary.")
test_parser.add_argument('-l', '--list-suites', action='store_true',
help="List test suites.")
test_parser.add_argument('-L', '--list-cases', action='store_true',
help="List test cases.")
test_parser.add_argument('--list-suite-paths', action='store_true',
help="List the path for each test suite.")
test_parser.add_argument('--list-case-paths', action='store_true',
help="List the path and line number for each test case.")
test_parser.add_argument('--list-defines', action='store_true',
help="List all defines in this test-runner.")
test_parser.add_argument('--list-permutation-defines', action='store_true',
help="List explicit defines in this test-runner.")
test_parser.add_argument('--list-implicit-defines', action='store_true',
help="List implicit defines in this test-runner.")
test_parser.add_argument('--list-geometries', action='store_true',
help="List the available disk geometries.")
test_parser.add_argument('--list-powerlosses', action='store_true',
help="List the available power-loss scenarios.")
test_parser.add_argument('-D', '--define', action='append',
help="Override a test define.")
test_parser.add_argument('-g', '--geometry',
help="Comma-separated list of disk geometries to test. \
Defaults to d,e,E,n,N.")
test_parser.add_argument('-p', '--powerloss',
help="Comma-separated list of power-loss scenarios to test. \
Defaults to 0,l.")
test_parser.add_argument('-d', '--disk',
help="Direct block device operations to this file.")
test_parser.add_argument('-t', '--trace',
help="Direct trace output to this file.")
test_parser.add_argument('-O', '--stdout',
help="Direct stdout to this file. Note stderr is already merged here.")
test_parser.add_argument('--read-sleep',
help="Artificial read delay in seconds.")
test_parser.add_argument('--prog-sleep',
help="Artificial prog delay in seconds.")
test_parser.add_argument('--erase-sleep',
help="Artificial erase delay in seconds.")
test_parser.add_argument('-j', '--jobs', nargs='?', type=int,
const=len(os.sched_getaffinity(0)),
help="Number of parallel runners to run.")
test_parser.add_argument('-k', '--keep-going', action='store_true',
help="Don't stop on first error.")
test_parser.add_argument('-i', '--isolate', action='store_true',
help="Run each test permutation in a separate process.")
test_parser.add_argument('-b', '--by-suites', action='store_true',
help="Step through tests by suite.")
test_parser.add_argument('-B', '--by-cases', action='store_true',
help="Step through tests by case.")
test_parser.add_argument('--context', type=lambda x: int(x, 0),
help="Show this many lines of stdout on test failure. \
Defaults to 5.")
test_parser.add_argument('--gdb', action='store_true',
help="Drop into gdb on test failure.")
test_parser.add_argument('--gdb-case', action='store_true',
help="Drop into gdb on test failure but stop at the beginning \
of the failing test case.")
test_parser.add_argument('--gdb-main', action='store_true',
help="Drop into gdb on test failure but stop at the beginning \
of main.")
test_parser.add_argument('--exec', default=[], type=lambda e: e.split(),
help="Run under another executable.")
test_parser.add_argument('--valgrind', action='store_true',
help="Run under Valgrind to find memory errors. Implicitly sets \
--isolate.")
# compilation flags
comp_parser = parser.add_argument_group('compilation options')
comp_parser.add_argument('test_paths', nargs='*',
help="Description of *.toml files to compile. May be a directory \
or a list of paths.")
comp_parser.add_argument('-c', '--compile', action='store_true',
help="Compile a test suite or source file.")
comp_parser.add_argument('-s', '--source',
help="Source file to compile, possibly injecting internal tests.")
comp_parser.add_argument('--include', default=HEADER_PATH,
help="Inject this header file into every compiled test file. \
Defaults to %r." % HEADER_PATH)
comp_parser.add_argument('-o', '--output',
help="Output file.")
# runner + test_ids overlaps test_paths, so we need to do some munging here
args = parser.parse_args()
args.test_paths = [' '.join(args.runner or [])] + args.test_ids
args.runner = args.runner or [RUNNER_PATH]
sys.exit(main(**{k: v
for k, v in vars(args).items()
if v is not None}))