13 from TrigValTools.TrigARTUtils 
import package_prefix, find_scripts, remember_cwd
 
   17     dict = {
'TriggerTest':      
'test_trig_data_v1Dev_build',
 
   18             'TrigP1Test':       
'test_trigP1_v1Dev_decodeBS_build',
 
   19             'TrigAnalysisTest': 
'test_trigAna_RDOtoRDOTrig_v1Dev_build'}
 
   21         return '({})'.
format(
'|'.
join([v 
for v 
in dict.values() 
if v]))
 
   22     elif package 
in dict 
and dict[package] 
is not None:
 
   25         logging.error(
"Minimal set of tests for %s is not defined.", package)
 
   30     packages = [
'TriggerTest', 
'TrigAnalysisTest', 
'TrigP1Test', 
'ALL']
 
   31     parser = argparse.ArgumentParser(usage=
'%(prog)s [options] [PackageName]')
 
   32     parser.add_argument(
'package',
 
   33                         metavar=
'PackageName',
 
   36                         help=
'Name of the package from which to run ART tests. Options are: %(choices)s.' 
   37                              ' If no name is provided, %(default)s is used.',
 
   39     parser.add_argument(
'-m', 
'--minimal',
 
   41                         help=
'Run a small pre-defined set of tests for basic verification')
 
   42     parser.add_argument(
'-n', 
'--testName',
 
   44                         help=
'Run only tests with a given pattern in the name')
 
   45     parser.add_argument(
'-r', 
'--rerun-failed',
 
   47                         help=
'Run tests that failed previously')
 
   48     parser.add_argument(
'-s', 
'--summary',
 
   50                         help=
'Print summary of previously run tests')
 
   51     parser.add_argument(
'-t', 
'--artType',
 
   54                         choices=[
'build', 
'grid'],
 
   55                         help=
'Run tests with the given art-type: build (default) or grid')
 
   56     parser.add_argument(
'-j', 
'--maxJobs',
 
   60                         help=
'Run up to N tests in parallel (actual number may be limited by ART based on available CPUs and memory)')
 
   61     parser.add_argument(
'-v', 
'--verbose',
 
   63                         help=
'Increase output verbosity')
 
   64     parser.add_argument(
'-d', 
'--dryRun',
 
   66                         help=
'List tests which would be executed, but don\'t execute them')
 
   74         patterns.append(args.testName)
 
   75     if args.artType == 
'grid':
 
   76         patterns.append(
'_grid.')
 
   78         patterns.append(
'_build.')
 
   82     logging.debug(
"Searching for scripts with the following patterns: %s", patterns)
 
   87     '''Analyses test results based on the final exit code, which determines 
   88     the status for ART tests based on TrigValSteering python framework''' 
   90     for step 
in test_results[
'result']:
 
   91         result_string += 
"%s: %d, " % (step[
'name'], step[
'result'])
 
   92     result_string += 
"exit: %d" % test_results[
'exit_code']
 
   93     is_success = (test_results[
'exit_code'] == 0)
 
   94     return result_string, is_success
 
   98     '''Analyses test results based on individual hard-coded step names, as 
   99     the final status cannot be unambiguously determined in the old shell-based 
  100     ART tests not using the TrigValSteering framework''' 
  102     steps_to_ignore = [
'RootComp', 
'MessageCount']
 
  104     for step 
in test_results[
'result']:
 
  105         result_string += 
"%s: %d, " % (step[
'name'], step[
'result'])
 
  106         if step[
'name'] 
not in steps_to_ignore 
and step[
'result'] != 0:
 
  108     result_string += 
"exit: %d" % test_results[
'exit_code']
 
  109     return result_string, is_success
 
  113     '''Prints a summary table of all results and returns two lists. One includes names of failed tests, 
  114     the other names of tests in which only the RootComp step failed. If only RootComp fails, the test is 
  115     not added to the first list, as we currently do not enforce updating RootComp references on every change.''' 
  118     for test_name 
in all_test_results.keys():
 
  119         if test_name.endswith(
'.py'):
 
  123         table[test_name] = result_string
 
  125             failed_tests.append(test_name)
 
  126     max_len_col1 = len(
max(table.keys(), key=len))
 
  127     max_len_col2 = len(
max(table.values(), key=len))
 
  128     logging.info(
'-'*(max_len_col1+max_len_col2+7))
 
  129     for k, v 
in table.items():
 
  130         logging.info(
'| {col1:<{width1}} | {col2:<{width2}} |'.
format(
 
  131             col1=k, width1=max_len_col1,
 
  132             col2=v, width2=max_len_col2))
 
  133     logging.info(
'-'*(max_len_col1+max_len_col2+7))
 
  138     if len(failed_tests) > 0:
 
  140             "%d tests succeeded out of %d executed",
 
  141             len(all_test_results)-len(failed_tests),
 
  142             len(all_test_results))
 
  143         logging.error(
"==================================================")
 
  144         logging.error(
"The following %d tests failed:", len(failed_tests))
 
  145         for test_name 
in failed_tests:
 
  146             logging.error(
"    %s", test_name)
 
  147         logging.error(
"==================================================")
 
  149         logging.info(
"==================================================")
 
  150         logging.info(
"All %d executed tests succeeded", len(all_test_results))
 
  151         logging.info(
"==================================================")
 
  155     if len(failed_tests) == 0:
 
  158     step_name = 
'CountRefComp' 
  159     for test_name 
in failed_tests:
 
  160         athena_failed = 
False 
  161         refcomp_failed = 
False 
  162         test_results = all_test_results[test_name]
 
  163         for step 
in test_results[
'result']:
 
  164             if step[
'name'] 
in [
'athena', 
'athenaHLT', 
'Reco_tf', 
'CheckLog'] 
and step[
'result'] != 0:
 
  166             if step[
'name'] == step_name 
and step[
'result'] != 0:
 
  167                 refcomp_failed = 
True 
  170         if refcomp_failed 
and not athena_failed:
 
  171             log_path = 
'results/runTrigART/{:s}/{:s}.log'.
format(os.path.splitext(test_name)[0], step_name)
 
  172             if not os.path.isfile(log_path):
 
  174             logging.info(
"==================================================")
 
  175             logging.info(
'Printing output of failed CountRefComp for %s', test_name)
 
  176             subprocess.call(
'cat ' + log_path, shell=
True)
 
  177             logging.info(
"==================================================")
 
  181     """ Creates test result structure if missing, if present clears the area only for the tests to be run""" 
  183     shutil.rmtree(topdir+
'/test', ignore_errors=
True)
 
  184     os.makedirs(topdir+
'/test', exist_ok=
True)
 
  187     for script 
in scripts:
 
  188         toerase = topdir+
'/results/runTrigART/'+os.path.splitext(os.path.basename(script))[0]
 
  189         shutil.rmtree(toerase, ignore_errors=
True)
 
  194     logging.basicConfig(stream=sys.stdout,
 
  195                         format=
'%(levelname)-8s %(message)s',
 
  196                         level=logging.DEBUG 
if args.verbose 
else logging.INFO)
 
  198     topdir = 
'runTrigART' 
  199     statusfile = 
'results/{:s}-status.json'.
format(topdir)
 
  202     if args.rerun_failed 
or args.summary:
 
  203         status_data = json.load(
open(os.path.join(topdir,statusfile)))
 
  204         all_test_results = status_data[topdir]
 
  210     if args.rerun_failed:
 
  215     logging.info(
"The following %d tests will be executed: ", len(scripts))
 
  216     for filename 
in scripts:
 
  217         logging.info(
"    %s", os.path.basename(filename))
 
  219     if len(scripts) > 5*args.maxJobs:
 
  220         if args.maxJobs == 1:
 
  221             logging.warning(
"You are running %d tests in sequence. This may take " 
  222                             "a long time, consider using -j N option.", len(scripts))
 
  224             logging.warning(
"You are running %d tests with %d parallel jobs. " 
  225                             "This may take a long time.", len(scripts), args.maxJobs)
 
  234         for script_path 
in scripts:
 
  235             target = 
'test/' + os.path.basename(script_path)
 
  236             os.symlink(script_path, target)
 
  240             'export ATLAS_LOCAL_ROOT_BASE="${ATLAS_LOCAL_ROOT_BASE:-/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase}"',
 
  241             'source "${ATLAS_LOCAL_ROOT_BASE}"/user/atlasLocalSetup.sh --quiet',
 
  243         art_cmd = 
'art.py run --run-all-tests --max-jobs={:d} {:s} . results'.
format(args.maxJobs, 
'-v' if args.verbose 
else '-q')
 
  244         commands.append(art_cmd)
 
  245         cmd = 
' && '.
join(commands)
 
  246         logging.info(
"Executing ART command: %s", art_cmd)
 
  247         subprocess.call(cmd, shell=
True)
 
  248         logging.info(
"ART finished, analysing the results\n")
 
  251         if not os.path.isfile(statusfile):
 
  252             logging.error(
"ART status.json file is missing - likely the ART runner failed!")
 
  254         with open(statusfile, 
'r') 
as f:
 
  255             status_data = json.load(f)
 
  256             all_test_results = status_data[topdir]
 
  257             if len(all_test_results) != len(scripts):
 
  258                 logging.warning(
"Selected %d tests but ART executed only %d. Please check why some tests did not run!")
 
  262             if len(failed_tests) > 0:
 
  271 if "__main__" in __name__: