mirror of
https://github.com/micropython/micropython.git
synced 2026-01-06 12:10:13 +01:00
tests: Format all Python code with black, except tests in basics subdir.
This adds the Python files in the tests/ directory to be formatted with ./tools/codeformat.py. The basics/ subdirectory is excluded for now so we aren't changing too much at once. In a few places `# fmt: off`/`# fmt: on` was used where the code had special formatting for readability or where the test was actually testing the specific formatting.
This commit is contained in:
committed by
Damien George
parent
488613bca6
commit
3dc324d3f1
@@ -10,20 +10,21 @@ import sys
|
||||
import argparse
|
||||
from glob import glob
|
||||
|
||||
sys.path.append('../tools')
|
||||
sys.path.append("../tools")
|
||||
import pyboard
|
||||
|
||||
# Paths for host executables
|
||||
if os.name == 'nt':
|
||||
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3.exe')
|
||||
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/windows/micropython.exe')
|
||||
if os.name == "nt":
|
||||
CPYTHON3 = os.getenv("MICROPY_CPYTHON3", "python3.exe")
|
||||
MICROPYTHON = os.getenv("MICROPY_MICROPYTHON", "../ports/windows/micropython.exe")
|
||||
else:
|
||||
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
|
||||
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython')
|
||||
CPYTHON3 = os.getenv("MICROPY_CPYTHON3", "python3")
|
||||
MICROPYTHON = os.getenv("MICROPY_MICROPYTHON", "../ports/unix/micropython")
|
||||
|
||||
PYTHON_TRUTH = CPYTHON3
|
||||
|
||||
BENCH_SCRIPT_DIR = 'perf_bench/'
|
||||
BENCH_SCRIPT_DIR = "perf_bench/"
|
||||
|
||||
|
||||
def compute_stats(lst):
|
||||
avg = 0
|
||||
@@ -35,8 +36,9 @@ def compute_stats(lst):
|
||||
var = max(0, var / len(lst) - avg ** 2)
|
||||
return avg, var ** 0.5
|
||||
|
||||
|
||||
def run_script_on_target(target, script):
|
||||
output = b''
|
||||
output = b""
|
||||
err = None
|
||||
|
||||
if isinstance(target, pyboard.Pyboard):
|
||||
@@ -49,21 +51,25 @@ def run_script_on_target(target, script):
|
||||
else:
|
||||
# Run local executable
|
||||
try:
|
||||
p = subprocess.run(target, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script)
|
||||
p = subprocess.run(
|
||||
target, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script
|
||||
)
|
||||
output = p.stdout
|
||||
except subprocess.CalledProcessError as er:
|
||||
err = er
|
||||
|
||||
return str(output.strip(), 'ascii'), err
|
||||
return str(output.strip(), "ascii"), err
|
||||
|
||||
|
||||
def run_feature_test(target, test):
|
||||
with open('feature_check/' + test + '.py', 'rb') as f:
|
||||
with open("feature_check/" + test + ".py", "rb") as f:
|
||||
script = f.read()
|
||||
output, err = run_script_on_target(target, script)
|
||||
if err is None:
|
||||
return output
|
||||
else:
|
||||
return 'CRASH: %r' % err
|
||||
return "CRASH: %r" % err
|
||||
|
||||
|
||||
def run_benchmark_on_target(target, script):
|
||||
output, err = run_script_on_target(target, script)
|
||||
@@ -72,36 +78,39 @@ def run_benchmark_on_target(target, script):
|
||||
try:
|
||||
return int(time), int(norm), result
|
||||
except ValueError:
|
||||
return -1, -1, 'CRASH: %r' % output
|
||||
return -1, -1, "CRASH: %r" % output
|
||||
else:
|
||||
return -1, -1, 'CRASH: %r' % err
|
||||
return -1, -1, "CRASH: %r" % err
|
||||
|
||||
|
||||
def run_benchmarks(target, param_n, param_m, n_average, test_list):
|
||||
skip_complex = run_feature_test(target, 'complex') != 'complex'
|
||||
skip_native = run_feature_test(target, 'native_check') != ''
|
||||
skip_complex = run_feature_test(target, "complex") != "complex"
|
||||
skip_native = run_feature_test(target, "native_check") != ""
|
||||
|
||||
for test_file in sorted(test_list):
|
||||
print(test_file + ': ', end='')
|
||||
print(test_file + ": ", end="")
|
||||
|
||||
# Check if test should be skipped
|
||||
skip = (
|
||||
skip_complex and test_file.find('bm_fft') != -1
|
||||
or skip_native and test_file.find('viper_') != -1
|
||||
skip_complex
|
||||
and test_file.find("bm_fft") != -1
|
||||
or skip_native
|
||||
and test_file.find("viper_") != -1
|
||||
)
|
||||
if skip:
|
||||
print('skip')
|
||||
print("skip")
|
||||
continue
|
||||
|
||||
# Create test script
|
||||
with open(test_file, 'rb') as f:
|
||||
with open(test_file, "rb") as f:
|
||||
test_script = f.read()
|
||||
with open(BENCH_SCRIPT_DIR + 'benchrun.py', 'rb') as f:
|
||||
with open(BENCH_SCRIPT_DIR + "benchrun.py", "rb") as f:
|
||||
test_script += f.read()
|
||||
test_script += b'bm_run(%u, %u)\n' % (param_n, param_m)
|
||||
test_script += b"bm_run(%u, %u)\n" % (param_n, param_m)
|
||||
|
||||
# Write full test script if needed
|
||||
if 0:
|
||||
with open('%s.full' % test_file, 'wb') as f:
|
||||
with open("%s.full" % test_file, "wb") as f:
|
||||
f.write(test_script)
|
||||
|
||||
# Run MicroPython a given number of times
|
||||
@@ -117,43 +126,49 @@ def run_benchmarks(target, param_n, param_m, n_average, test_list):
|
||||
if result_out is None:
|
||||
result_out = result
|
||||
elif result != result_out:
|
||||
error = 'FAIL self'
|
||||
error = "FAIL self"
|
||||
break
|
||||
times.append(time)
|
||||
scores.append(1e6 * norm / time)
|
||||
|
||||
# Check result against truth if needed
|
||||
if error is None and result_out != 'None':
|
||||
if error is None and result_out != "None":
|
||||
_, _, result_exp = run_benchmark_on_target(PYTHON_TRUTH, test_script)
|
||||
if result_out != result_exp:
|
||||
error = 'FAIL truth'
|
||||
error = "FAIL truth"
|
||||
|
||||
if error is not None:
|
||||
print(error)
|
||||
else:
|
||||
t_avg, t_sd = compute_stats(times)
|
||||
s_avg, s_sd = compute_stats(scores)
|
||||
print('{:.2f} {:.4f} {:.2f} {:.4f}'.format(t_avg, 100 * t_sd / t_avg, s_avg, 100 * s_sd / s_avg))
|
||||
print(
|
||||
"{:.2f} {:.4f} {:.2f} {:.4f}".format(
|
||||
t_avg, 100 * t_sd / t_avg, s_avg, 100 * s_sd / s_avg
|
||||
)
|
||||
)
|
||||
if 0:
|
||||
print(' times: ', times)
|
||||
print(' scores:', scores)
|
||||
print(" times: ", times)
|
||||
print(" scores:", scores)
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def parse_output(filename):
|
||||
with open(filename) as f:
|
||||
params = f.readline()
|
||||
n, m, _ = params.strip().split()
|
||||
n = int(n.split('=')[1])
|
||||
m = int(m.split('=')[1])
|
||||
n = int(n.split("=")[1])
|
||||
m = int(m.split("=")[1])
|
||||
data = []
|
||||
for l in f:
|
||||
if l.find(': ') != -1 and l.find(': skip') == -1 and l.find('CRASH: ') == -1:
|
||||
name, values = l.strip().split(': ')
|
||||
if l.find(": ") != -1 and l.find(": skip") == -1 and l.find("CRASH: ") == -1:
|
||||
name, values = l.strip().split(": ")
|
||||
values = tuple(float(v) for v in values.split())
|
||||
data.append((name,) + values)
|
||||
return n, m, data
|
||||
|
||||
|
||||
def compute_diff(file1, file2, diff_score):
|
||||
# Parse output data from previous runs
|
||||
n1, m1, d1 = parse_output(file1)
|
||||
@@ -161,14 +176,18 @@ def compute_diff(file1, file2, diff_score):
|
||||
|
||||
# Print header
|
||||
if diff_score:
|
||||
print('diff of scores (higher is better)')
|
||||
print("diff of scores (higher is better)")
|
||||
else:
|
||||
print('diff of microsecond times (lower is better)')
|
||||
print("diff of microsecond times (lower is better)")
|
||||
if n1 == n2 and m1 == m2:
|
||||
hdr = 'N={} M={}'.format(n1, m1)
|
||||
hdr = "N={} M={}".format(n1, m1)
|
||||
else:
|
||||
hdr = 'N={} M={} vs N={} M={}'.format(n1, m1, n2, m2)
|
||||
print('{:24} {:>10} -> {:>10} {:>10} {:>7}% (error%)'.format(hdr, file1, file2, 'diff', 'diff'))
|
||||
hdr = "N={} M={} vs N={} M={}".format(n1, m1, n2, m2)
|
||||
print(
|
||||
"{:24} {:>10} -> {:>10} {:>10} {:>7}% (error%)".format(
|
||||
hdr, file1, file2, "diff", "diff"
|
||||
)
|
||||
)
|
||||
|
||||
# Print entries
|
||||
while d1 and d2:
|
||||
@@ -176,32 +195,47 @@ def compute_diff(file1, file2, diff_score):
|
||||
# Found entries with matching names
|
||||
entry1 = d1.pop(0)
|
||||
entry2 = d2.pop(0)
|
||||
name = entry1[0].rsplit('/')[-1]
|
||||
name = entry1[0].rsplit("/")[-1]
|
||||
av1, sd1 = entry1[1 + 2 * diff_score], entry1[2 + 2 * diff_score]
|
||||
av2, sd2 = entry2[1 + 2 * diff_score], entry2[2 + 2 * diff_score]
|
||||
sd1 *= av1 / 100 # convert from percent sd to absolute sd
|
||||
sd2 *= av2 / 100 # convert from percent sd to absolute sd
|
||||
sd1 *= av1 / 100 # convert from percent sd to absolute sd
|
||||
sd2 *= av2 / 100 # convert from percent sd to absolute sd
|
||||
av_diff = av2 - av1
|
||||
sd_diff = (sd1 ** 2 + sd2 ** 2) ** 0.5
|
||||
percent = 100 * av_diff / av1
|
||||
percent_sd = 100 * sd_diff / av1
|
||||
print('{:24} {:10.2f} -> {:10.2f} : {:+10.2f} = {:+7.3f}% (+/-{:.2f}%)'.format(name, av1, av2, av_diff, percent, percent_sd))
|
||||
print(
|
||||
"{:24} {:10.2f} -> {:10.2f} : {:+10.2f} = {:+7.3f}% (+/-{:.2f}%)".format(
|
||||
name, av1, av2, av_diff, percent, percent_sd
|
||||
)
|
||||
)
|
||||
elif d1[0][0] < d2[0][0]:
|
||||
d1.pop(0)
|
||||
else:
|
||||
d2.pop(0)
|
||||
|
||||
|
||||
def main():
|
||||
cmd_parser = argparse.ArgumentParser(description='Run benchmarks for MicroPython')
|
||||
cmd_parser.add_argument('-t', '--diff-time', action='store_true', help='diff time outputs from a previous run')
|
||||
cmd_parser.add_argument('-s', '--diff-score', action='store_true', help='diff score outputs from a previous run')
|
||||
cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py')
|
||||
cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py')
|
||||
cmd_parser.add_argument('-a', '--average', default='8', help='averaging number')
|
||||
cmd_parser.add_argument('--emit', default='bytecode', help='MicroPython emitter to use (bytecode or native)')
|
||||
cmd_parser.add_argument('N', nargs=1, help='N parameter (approximate target CPU frequency)')
|
||||
cmd_parser.add_argument('M', nargs=1, help='M parameter (approximate target heap in kbytes)')
|
||||
cmd_parser.add_argument('files', nargs='*', help='input test files')
|
||||
cmd_parser = argparse.ArgumentParser(description="Run benchmarks for MicroPython")
|
||||
cmd_parser.add_argument(
|
||||
"-t", "--diff-time", action="store_true", help="diff time outputs from a previous run"
|
||||
)
|
||||
cmd_parser.add_argument(
|
||||
"-s", "--diff-score", action="store_true", help="diff score outputs from a previous run"
|
||||
)
|
||||
cmd_parser.add_argument(
|
||||
"-p", "--pyboard", action="store_true", help="run tests via pyboard.py"
|
||||
)
|
||||
cmd_parser.add_argument(
|
||||
"-d", "--device", default="/dev/ttyACM0", help="the device for pyboard.py"
|
||||
)
|
||||
cmd_parser.add_argument("-a", "--average", default="8", help="averaging number")
|
||||
cmd_parser.add_argument(
|
||||
"--emit", default="bytecode", help="MicroPython emitter to use (bytecode or native)"
|
||||
)
|
||||
cmd_parser.add_argument("N", nargs=1, help="N parameter (approximate target CPU frequency)")
|
||||
cmd_parser.add_argument("M", nargs=1, help="M parameter (approximate target heap in kbytes)")
|
||||
cmd_parser.add_argument("files", nargs="*", help="input test files")
|
||||
args = cmd_parser.parse_args()
|
||||
|
||||
if args.diff_time or args.diff_score:
|
||||
@@ -219,21 +253,22 @@ def main():
|
||||
target = pyboard.Pyboard(args.device)
|
||||
target.enter_raw_repl()
|
||||
else:
|
||||
target = [MICROPYTHON, '-X', 'emit=' + args.emit]
|
||||
target = [MICROPYTHON, "-X", "emit=" + args.emit]
|
||||
|
||||
if len(args.files) == 0:
|
||||
tests_skip = ('benchrun.py',)
|
||||
tests_skip = ("benchrun.py",)
|
||||
if M <= 25:
|
||||
# These scripts are too big to be compiled by the target
|
||||
tests_skip += ('bm_chaos.py', 'bm_hexiom.py', 'misc_raytrace.py')
|
||||
tests_skip += ("bm_chaos.py", "bm_hexiom.py", "misc_raytrace.py")
|
||||
tests = sorted(
|
||||
BENCH_SCRIPT_DIR + test_file for test_file in os.listdir(BENCH_SCRIPT_DIR)
|
||||
if test_file.endswith('.py') and test_file not in tests_skip
|
||||
BENCH_SCRIPT_DIR + test_file
|
||||
for test_file in os.listdir(BENCH_SCRIPT_DIR)
|
||||
if test_file.endswith(".py") and test_file not in tests_skip
|
||||
)
|
||||
else:
|
||||
tests = sorted(args.files)
|
||||
|
||||
print('N={} M={} n_average={}'.format(N, M, n_average))
|
||||
print("N={} M={} n_average={}".format(N, M, n_average))
|
||||
|
||||
run_benchmarks(target, N, M, n_average, tests)
|
||||
|
||||
@@ -241,5 +276,6 @@ def main():
|
||||
target.exit_raw_repl()
|
||||
target.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user