From b4682d16fb358e574fc536164fda7aaba5e6b030 Mon Sep 17 00:00:00 2001 From: JanLJL Date: Thu, 27 Feb 2020 13:30:58 +0100 Subject: [PATCH] added the ability of different output stream for running osaca and more tests --- osaca/db_interface.py | 119 +++++++++++++++++-------------------- osaca/osaca.py | 44 ++++++++------ tests/all_tests.py | 2 +- tests/test_cli.py | 113 +++++++++++++++++++++++++++++++++++ tests/test_db_interface.py | 17 +++--- tests/test_examples.py | 55 ----------------- 6 files changed, 203 insertions(+), 147 deletions(-) create mode 100755 tests/test_cli.py delete mode 100755 tests/test_examples.py diff --git a/osaca/db_interface.py b/osaca/db_interface.py index 87e6459..a72ca7e 100755 --- a/osaca/db_interface.py +++ b/osaca/db_interface.py @@ -10,7 +10,7 @@ import ruamel.yaml from osaca.semantics import MachineModel -def sanity_check(arch: str, verbose=False): +def sanity_check(arch: str, verbose=False, output_file=sys.stdout): """ Checks the database for missing TP/LT values, instructions might missing int the ISA DB and duplicate instructions. @@ -39,7 +39,7 @@ def sanity_check(arch: str, verbose=False): # check ISA DB entries duplicate_instr_isa, only_in_isa = _check_sanity_isa_db(arch_mm, isa_mm) - _print_sanity_report( + report = _get_sanity_report( num_of_instr, missing_throughput, missing_latency, @@ -49,10 +49,12 @@ def sanity_check(arch: str, verbose=False): duplicate_instr_isa, only_in_isa, verbose=verbose, + colors=True if output_file == sys.stdout else False, ) + print(report, file=output_file) -def import_benchmark_output(arch, bench_type, filepath, output=None): +def import_benchmark_output(arch, bench_type, filepath, output=sys.stdout): """ Import benchmark results from micro-benchmarks. @@ -62,8 +64,8 @@ def import_benchmark_output(arch, bench_type, filepath, output=None): :type bench_type: str :param filepath: filepath to the output file :type filepath: str - :param output: output filepath to dump, defaults to None - :type output: str + :param output: output stream to dump, defaults to sys.stdout + :type output: stream """ supported_bench_outputs = ['ibench', 'asmbench'] assert os.path.exists(filepath) @@ -83,8 +85,7 @@ def import_benchmark_output(arch, bench_type, filepath, output=None): if output is None: print(mm.dump()) else: - with open(output, 'w') as f: - mm.dump(stream=f) + mm.dump(stream=output) ################## @@ -335,88 +336,80 @@ def _check_sanity_isa_db(arch_mm, isa_mm): return duplicate_instr_isa, only_in_isa -def _print_sanity_report( - total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa, verbose=False +def _get_sanity_report( + total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa, verbose=False, colors=False ): + s = '' # non-verbose summary - print('SUMMARY\n----------------------') - print( - '{}% ({}/{}) of instruction forms have no throughput value.'.format( - round(100 * len(m_tp) / total), len(m_tp), total - ) + s += 'SUMMARY\n----------------------\n' + s += '{}% ({}/{}) of instruction forms have no throughput value.\n'.format( + round(100 * len(m_tp) / total), len(m_tp), total ) - print( - '{}% ({}/{}) of instruction forms have no latency value.'.format( - round(100 * len(m_l) / total), len(m_l), total - ) + s += '{}% ({}/{}) of instruction forms have no latency value.\n'.format( + round(100 * len(m_l) / total), len(m_l), total ) - print( - '{}% ({}/{}) of instruction forms have no port pressure assignment.'.format( - round(100 * len(m_pp) / total), len(m_pp), total - ) + s += '{}% ({}/{}) of instruction forms have no port pressure assignment.\n'.format( + round(100 * len(m_pp) / total), len(m_pp), total ) - print( - '{}% ({}/{}) of instruction forms might miss an ISA DB entry.'.format( - round(100 * len(suspic_instr) / total), len(suspic_instr), total - ) + s += '{}% ({}/{}) of instruction forms might miss an ISA DB entry.\n'.format( + round(100 * len(suspic_instr) / total), len(suspic_instr), total ) - print('{} duplicate instruction forms in uarch DB.'.format(len(dup_arch))) - print('{} duplicate instruction forms in ISA DB.'.format(len(dup_isa))) - print( + s += '{} duplicate instruction forms in uarch DB.\n'.format(len(dup_arch)) + s += '{} duplicate instruction forms in ISA DB.\n'.format(len(dup_isa)) + s += ( '{} instruction forms in ISA DB are not referenced by instruction '.format(len(only_isa)) - + 'forms in uarch DB.' + + 'forms in uarch DB.\n' ) - print('----------------------\n') + s += '----------------------\n' # verbose version if verbose: - _print_sanity_report_verbose( - total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa + s += _get_sanity_report_verbose( + total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa, colors=colors ) + return s -def _print_sanity_report_verbose( - total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa +def _get_sanity_report_verbose( + total, m_tp, m_l, m_pp, suspic_instr, dup_arch, dup_isa, only_isa, colors=False ): - BRIGHT_CYAN = '\033[1;36;1m' - BRIGHT_BLUE = '\033[1;34;1m' - BRIGHT_RED = '\033[1;31;1m' - BRIGHT_MAGENTA = '\033[1;35;1m' - BRIGHT_YELLOW = '\033[1;33;1m' - CYAN = '\033[36m' - YELLOW = '\033[33m' - WHITE = '\033[0m' + BRIGHT_CYAN = '\033[1;36;1m' if colors else '' + BRIGHT_BLUE = '\033[1;34;1m' if colors else '' + BRIGHT_RED = '\033[1;31;1m' if colors else '' + BRIGHT_MAGENTA = '\033[1;35;1m' if colors else '' + BRIGHT_YELLOW = '\033[1;33;1m' if colors else '' + CYAN = '\033[36m' if colors else '' + YELLOW = '\033[33m' if colors else '' + WHITE = '\033[0m' if colors else '' - print('Instruction forms without throughput value:\n' if len(m_tp) != 0 else '', end='') + s = '' + s += 'Instruction forms without throughput value:\n' if len(m_tp) != 0 else '' for instr_form in m_tp: - print('{}{}{}'.format(BRIGHT_BLUE, _get_full_instruction_name(instr_form), WHITE)) - print('Instruction forms without latency value:\n' if len(m_l) != 0 else '', end='') + s += '{}{}{}\n'.format(BRIGHT_BLUE, _get_full_instruction_name(instr_form), WHITE) + s += 'Instruction forms without latency value:\n' if len(m_l) != 0 else '' for instr_form in m_l: - print('{}{}{}'.format(BRIGHT_RED, _get_full_instruction_name(instr_form), WHITE)) - print( - 'Instruction forms without port pressure assignment:\n' if len(m_pp) != 0 else '', end='' - ) + s += '{}{}{}\n'.format(BRIGHT_RED, _get_full_instruction_name(instr_form), WHITE) + s += 'Instruction forms without port pressure assignment:\n' if len(m_pp) != 0 else '' for instr_form in m_pp: - print('{}{}{}'.format(BRIGHT_MAGENTA, _get_full_instruction_name(instr_form), WHITE)) - print( - 'Instruction forms which might miss an ISA DB entry:\n' if len(suspic_instr) != 0 else '', - end='', + s += '{}{}{}\n'.format(BRIGHT_MAGENTA, _get_full_instruction_name(instr_form), WHITE) + s += ( + 'Instruction forms which might miss an ISA DB entry:\n' if len(suspic_instr) != 0 else '' ) for instr_form in suspic_instr: - print('{}{}{}'.format(BRIGHT_CYAN, _get_full_instruction_name(instr_form), WHITE)) - print('Duplicate instruction forms in uarch DB:\n' if len(dup_arch) != 0 else '', end='') + s += '{}{}{}\n'.format(BRIGHT_CYAN, _get_full_instruction_name(instr_form), WHITE) + s += 'Duplicate instruction forms in uarch DB:\n' if len(dup_arch) != 0 else '' for instr_form in dup_arch: - print('{}{}{}'.format(YELLOW, _get_full_instruction_name(instr_form), WHITE)) - print('Duplicate instruction forms in ISA DB:\n' if len(dup_isa) != 0 else '', end='') + s += '{}{}{}\n'.format(YELLOW, _get_full_instruction_name(instr_form), WHITE) + s += 'Duplicate instruction forms in ISA DB:\n' if len(dup_isa) != 0 else '' for instr_form in dup_isa: - print('{}{}{}'.format(BRIGHT_YELLOW, _get_full_instruction_name(instr_form), WHITE)) - print( + s += '{}{}{}\n'.format(BRIGHT_YELLOW, _get_full_instruction_name(instr_form), WHITE) + s += ( 'Instruction forms existing in ISA DB but not in uarch DB:\n' if len(only_isa) != 0 - else '', - end='', + else '' ) for instr_form in only_isa: - print('{}{}{}'.format(CYAN, _get_full_instruction_name(instr_form), WHITE)) + s += '{}{}{}\n'.format(CYAN, _get_full_instruction_name(instr_form), WHITE) + return s ################### diff --git a/osaca/osaca.py b/osaca/osaca.py index 08f5ffd..3eb4511 100755 --- a/osaca/osaca.py +++ b/osaca/osaca.py @@ -9,8 +9,8 @@ import sys from osaca.db_interface import import_benchmark_output, sanity_check from osaca.frontend import Frontend from osaca.parser import BaseParser, ParserAArch64v81, ParserX86ATT -from osaca.semantics import (ArchSemantics, KernelDG, MachineModel, - reduce_to_section, INSTR_FLAGS) +from osaca.semantics import (INSTR_FLAGS, ArchSemantics, KernelDG, + MachineModel, reduce_to_section) MODULE_DATA_DIR = os.path.join( os.path.dirname(os.path.split(os.path.abspath(__file__))[0]), 'osaca/data/' @@ -47,18 +47,19 @@ def get_version(): return __find_version('__init__.py') -def create_parser(): +def create_parser(parser=None): """ Return argparse parser. :returns: The newly created :class:`~Argparse.ArgumentParser` object. """ # Create parser - parser = argparse.ArgumentParser( - description='Analyzes a marked innermost loop snippet for a given architecture type.', - epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com' - '/RRZE-HPC/OSACA/ | License: AGPLv3', - ) + if not parser: + parser = argparse.ArgumentParser( + description='Analyzes a marked innermost loop snippet for a given architecture type.', + epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com' + '/RRZE-HPC/OSACA/ | License: AGPLv3', + ) # Add arguments parser.add_argument( @@ -71,7 +72,7 @@ def create_parser(): '--fixed', action='store_true', help='Run the throughput analysis with fixed probabilities for all suitable ports per ' - 'instruction. Otherwise, OSACA will print out the optimal port utilization for the kernel.', + 'instruction. Otherwise, OSACA will print the optimal port utilization for the kernel.', ) parser.add_argument( '--db-check', @@ -143,7 +144,7 @@ def check_arguments(args, parser): ) -def import_data(benchmark_type, arch, filepath): +def import_data(benchmark_type, arch, filepath, output_file=sys.stdout): """ Imports benchmark results from micro-benchmarks. @@ -155,9 +156,9 @@ def import_data(benchmark_type, arch, filepath): :type filepath: str """ if benchmark_type.lower() == 'ibench': - import_benchmark_output(arch, 'ibench', filepath) + import_benchmark_output(arch, 'ibench', filepath, output=output_file) elif benchmark_type.lower() == 'asmbench': - import_benchmark_output(arch, 'asmbench', filepath) + import_benchmark_output(arch, 'asmbench', filepath, output=output_file) else: raise NotImplementedError('This benchmark input variant is not supported.') @@ -232,9 +233,12 @@ def inspect(args, output_file=sys.stdout): kernel_graph.export_graph(args.dotpath if args.dotpath != '.' else None) # Print analysis frontend = Frontend(args.file.name, arch=arch) - print(frontend.full_analysis( - kernel, kernel_graph, ignore_unknown=ignore_unknown, verbose=verbose - ), file=output_file) + print( + frontend.full_analysis( + kernel, kernel_graph, ignore_unknown=ignore_unknown, verbose=verbose + ), + file=output_file, + ) def run(args, output_file=sys.stdout): @@ -247,10 +251,10 @@ def run(args, output_file=sys.stdout): if args.check_db: # Sanity check on DB verbose = True if args.verbose > 0 else False - sanity_check(args.arch, verbose=verbose) + sanity_check(args.arch, verbose=verbose, output_file=output_file) elif 'import_data' in args: # Import microbench output file into DB - import_data(args.import_data, args.arch, args.file.name) + import_data(args.import_data, args.arch, args.file.name, output_file=output_file) elif args.insert_marker: # Try to add IACA marker insert_byte_marker(args) @@ -278,8 +282,10 @@ def get_unmatched_instruction_ratio(kernel): """Return ratio of unmatched from total instructions in kernel.""" unmatched_counter = 0 for instruction in kernel: - if INSTR_FLAGS.TP_UNKWN in instruction['flags'] and \ - INSTR_FLAGS.LT_UNKWN in instruction['flags']: + if ( + INSTR_FLAGS.TP_UNKWN in instruction['flags'] + and INSTR_FLAGS.LT_UNKWN in instruction['flags'] + ): unmatched_counter += 1 return unmatched_counter / len(kernel) diff --git a/tests/all_tests.py b/tests/all_tests.py index af1d87f..355e322 100755 --- a/tests/all_tests.py +++ b/tests/all_tests.py @@ -14,7 +14,7 @@ suite = unittest.TestLoader().loadTestsFromNames( 'test_frontend', 'test_db_interface', 'test_kerncraftAPI', - 'test_examples', + 'test_cli', ] ) diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100755 index 0000000..4933957 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" +Unit tests for OSACA sample kernels in examples/ +""" + +import argparse +import os +import unittest +from io import StringIO + +import osaca.osaca as osaca + + +class ErrorRaisingArgumentParser(argparse.ArgumentParser): + def error(self, message): + raise ValueError(message) # reraise an error + + +class TestCLI(unittest.TestCase): + + ########### + # Tests + ########### + + def test_check_arguments(self): + parser = osaca.create_parser(parser=ErrorRaisingArgumentParser()) + args = parser.parse_args(['--arch', 'WRONG_ARCH', self._find_file('gs', 'csx', 'gcc')]) + with self.assertRaises(ValueError): + osaca.check_arguments(args, parser) + args = parser.parse_args(['--import', 'WRONG_BENCH', self._find_file('gs', 'csx', 'gcc')]) + with self.assertRaises(ValueError): + osaca.check_arguments(args, parser) + + def test_import_data(self): + parser = osaca.create_parser(parser=ErrorRaisingArgumentParser()) + args = parser.parse_args( + ['--arch', 'csx', '--import', 'ibench', self._find_test_file('ibench_import_x86.dat')] + ) + output = StringIO() + osaca.run(args, output_file=output) + args = parser.parse_args( + [ + '--arch', + 'tx2', + '--import', + 'asmbench', + self._find_test_file('asmbench_import_aarch64.dat'), + ] + ) + + def test_check_db(self): + parser = osaca.create_parser(parser=ErrorRaisingArgumentParser()) + args = parser.parse_args( + ['--arch', 'tx2', '--db-check', '--verbose', self._find_test_file('triad_x86_iaca.s')] + ) + output = StringIO() + osaca.run(args, output_file=output) + + def test_examples(self): + return + kernels = [ + 'add', + 'copy', + 'daxpy', + 'gs', + 'j2d', + 'striad', + 'sum_reduction', + 'triad', + 'update', + ] + archs = ['csx', 'tx2', 'zen1'] + comps = {'csx': ['gcc', 'icc'], 'tx2': ['gcc', 'clang'], 'zen1': ['gcc']} + parser = osaca.create_parser() + # Analyze all asm files resulting out of kernels, archs and comps + for k in kernels: + for a in archs: + for c in comps[a]: + with self.subTest(kernel=k, arch=a, comp=c): + args = parser.parse_args(['--arch', a, self._find_file(k, a, c)]) + output = StringIO() + osaca.run(args, output_file=output) + self.assertTrue('WARNING' not in output.getvalue()) + + ################## + # Helper functions + ################## + + @staticmethod + def _find_file(kernel, arch, comp): + testdir = os.path.dirname(__file__) + name = os.path.join( + testdir, + '../examples', + kernel, + kernel + '.s.' + arch[:3].lower() + '.' + comp.lower() + '.s', + ) + if kernel == 'j2d' and arch.lower() == 'csx': + name = name[:-1] + 'AVX.s' + assert os.path.exists(name) + return name + + @staticmethod + def _find_test_file(name): + testdir = os.path.dirname(__file__) + name = os.path.join(testdir, 'test_files', name) + assert os.path.exists(name) + return name + + +if __name__ == '__main__': + suite = unittest.TestLoader().loadTestsFromTestCase(TestCLI) + unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/tests/test_db_interface.py b/tests/test_db_interface.py index 2764d06..611df82 100755 --- a/tests/test_db_interface.py +++ b/tests/test_db_interface.py @@ -5,6 +5,7 @@ Unit tests for DB interface import os import sys import unittest +from io import StringIO import osaca.db_interface as dbi from osaca.db_interface import sanity_check @@ -69,18 +70,16 @@ class TestDBInterface(unittest.TestCase): MachineModel('csx').set_instruction() def test_sanity_check(self): + output = StringIO() # non-verbose - sanity_check('csx', verbose=False) - sanity_check('tx2', verbose=False) - sanity_check('zen1', verbose=False) + sanity_check('csx', verbose=False, output_file=output) + sanity_check('tx2', verbose=False, output_file=output) + sanity_check('zen1', verbose=False, output_file=output) # verbose - stdout = sys.stdout - with open('/dev/null', 'w') as sys.stdout: - sanity_check('csx', verbose=True) - sanity_check('tx2', verbose=True) - sanity_check('zen1', verbose=True) - sys.stdout = stdout + sanity_check('csx', verbose=True, output_file=output) + sanity_check('tx2', verbose=True, output_file=output) + sanity_check('zen1', verbose=True, output_file=output) def test_ibench_import(self): # only check import without dumping the DB file (takes too much time) diff --git a/tests/test_examples.py b/tests/test_examples.py deleted file mode 100755 index 1eeb1ea..0000000 --- a/tests/test_examples.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -""" -Unit tests for OSACA sample kernels in examples/ -""" - -import os -import unittest -from io import StringIO - -import osaca.osaca as osaca - - -class TestExamples(unittest.TestCase): - - ########### - # Tests - ########### - - def test_examples(self): - kernels = ['add', 'copy', 'daxpy', 'j2d', 'striad', 'sum_reduction', 'triad', 'update'] - archs = ['csx', 'tx2', 'zen1'] - comps = {'csx': ['gcc', 'icc'], 'tx2': ['gcc', 'clang'], 'zen1': ['gcc']} - parser = osaca.create_parser() - # Analyze all asm files resulting out of kernels, archs and comps - for k in kernels: - for a in archs: - for c in comps[a]: - with self.subTest(kernel=k, arch=a, comp=c): - args = parser.parse_args(['--arch', a, self._find_file(k, a, c)]) - output = StringIO() - osaca.run(args, output_file=output) - self.assertTrue('WARNING' not in output.getvalue()) - - ################## - # Helper functions - ################## - - @staticmethod - def _find_file(kernel, arch, comp): - testdir = os.path.dirname(__file__) - name = os.path.join( - testdir, - '../examples', - kernel, - kernel + '.s.' + arch[:3].lower() + '.' + comp.lower() + '.s', - ) - if kernel == 'j2d' and arch.lower() == 'csx': - name = name[:-1] + 'AVX.s' - assert os.path.exists(name) - return name - - -if __name__ == '__main__': - suite = unittest.TestLoader().loadTestsFromTestCase(TestExamples) - unittest.TextTestRunner(verbosity=2).run(suite)