13from TrigValTools.TrigARTUtils
import package_prefix, find_scripts, remember_cwd
113 '''Prints a summary table of all results and returns two lists. One includes names of failed tests,
114 the other names of tests in which only the RootComp step failed. If only RootComp fails, the test is
115 not added to the first list, as we currently do not enforce updating RootComp references on every change.'''
118 for test_name
in all_test_results.keys():
119 if test_name.endswith(
'.py'):
123 table[test_name] = result_string
125 failed_tests.append(test_name)
126 max_len_col1 = len(
max(table.keys(), key=len))
127 max_len_col2 = len(
max(table.values(), key=len))
128 logging.info(
'-'*(max_len_col1+max_len_col2+7))
129 for k, v
in table.items():
130 logging.info(
'| {col1:<{width1}} | {col2:<{width2}} |'.format(
131 col1=k, width1=max_len_col1,
132 col2=v, width2=max_len_col2))
133 logging.info(
'-'*(max_len_col1+max_len_col2+7))
155 if len(failed_tests) == 0:
158 step_name =
'CountRefComp'
159 for test_name
in failed_tests:
160 athena_failed =
False
161 refcomp_failed =
False
162 test_results = all_test_results[test_name]
163 for step
in test_results[
'result']:
164 if step[
'name']
in [
'athena',
'athenaHLT',
'Reco_tf',
'CheckLog']
and step[
'result'] != 0:
166 if step[
'name'] == step_name
and step[
'result'] != 0:
167 refcomp_failed =
True
170 if refcomp_failed
and not athena_failed:
171 log_path =
'results/runTrigART/{:s}/{:s}.log'.format(os.path.splitext(test_name)[0], step_name)
172 if not os.path.isfile(log_path):
174 logging.info(
"==================================================")
175 logging.info(
'Printing output of failed CountRefComp for %s', test_name)
176 subprocess.call(
'cat ' + log_path, shell=
True)
177 logging.info(
"==================================================")
194 logging.basicConfig(stream=sys.stdout,
195 format=
'%(levelname)-8s %(message)s',
196 level=logging.DEBUG
if args.verbose
else logging.INFO)
198 topdir =
'runTrigART'
199 statusfile =
'results/{:s}-status.json'.format(topdir)
202 if args.rerun_failed
or args.summary:
203 status_data = json.load(open(os.path.join(topdir,statusfile)))
204 all_test_results = status_data[topdir]
210 if args.rerun_failed:
211 scripts = find_scripts(
get_patterns(args)+[
'|'.join(failed_tests)])
215 logging.info(
"The following %d tests will be executed: ", len(scripts))
216 for filename
in scripts:
217 logging.info(
" %s", os.path.basename(filename))
219 if len(scripts) > 5*args.maxJobs:
220 if args.maxJobs == 1:
221 logging.warning(
"You are running %d tests in sequence. This may take "
222 "a long time, consider using -j N option.", len(scripts))
224 logging.warning(
"You are running %d tests with %d parallel jobs. "
225 "This may take a long time.", len(scripts), args.maxJobs)
234 for script_path
in scripts:
235 target =
'test/' + os.path.basename(script_path)
236 os.symlink(script_path, target)
240 'export ATLAS_LOCAL_ROOT_BASE="${ATLAS_LOCAL_ROOT_BASE:-/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase}"',
241 'source "${ATLAS_LOCAL_ROOT_BASE}"/user/atlasLocalSetup.sh --quiet',
243 art_cmd =
'art.py run --run-all-tests --max-jobs={:d} {:s} . results'.format(args.maxJobs,
'-v' if args.verbose
else '-q')
244 commands.append(art_cmd)
245 cmd =
' && '.join(commands)
246 logging.info(
"Executing ART command: %s", art_cmd)
247 subprocess.call(cmd, shell=
True)
248 logging.info(
"ART finished, analysing the results\n")
251 if not os.path.isfile(statusfile):
252 logging.error(
"ART status.json file is missing - likely the ART runner failed!")
254 with open(statusfile,
'r')
as f:
255 status_data = json.load(f)
256 all_test_results = status_data[topdir]
257 if len(all_test_results) != len(scripts):
258 logging.warning(
"Selected %d tests but ART executed only %d. Please check why some tests did not run!")
262 if len(failed_tests) > 0: