ATLAS Offline Software
Functions
runTrigART Namespace Reference

Functions

def minimal_pattern (package)
 
def get_parser ()
 
def get_patterns (args)
 
def analyse_result_python (test_results)
 
def analyse_result_shell (test_results)
 
def analyse_results (all_test_results)
 
def print_summary (all_test_results, failed_tests)
 
def print_failed_ref_comp (all_test_results, failed_tests)
 
def prep_dirs (topdir, scripts)
 
def main ()
 

Function Documentation

◆ analyse_result_python()

def runTrigART.analyse_result_python (   test_results)
Analyses test results based on the final exit code, which determines
the status for ART tests based on TrigValSteering python framework

Definition at line 86 of file runTrigART.py.

86 def analyse_result_python(test_results):
87  '''Analyses test results based on the final exit code, which determines
88  the status for ART tests based on TrigValSteering python framework'''
89  result_string = ''
90  for step in test_results['result']:
91  result_string += "%s: %d, " % (step['name'], step['result'])
92  result_string += "exit: %d" % test_results['exit_code']
93  is_success = (test_results['exit_code'] == 0)
94  return result_string, is_success
95 
96 

◆ analyse_result_shell()

def runTrigART.analyse_result_shell (   test_results)
Analyses test results based on individual hard-coded step names, as
the final status cannot be unambiguously determined in the old shell-based
ART tests not using the TrigValSteering framework

Definition at line 97 of file runTrigART.py.

97 def analyse_result_shell(test_results):
98  '''Analyses test results based on individual hard-coded step names, as
99  the final status cannot be unambiguously determined in the old shell-based
100  ART tests not using the TrigValSteering framework'''
101  result_string = ''
102  steps_to_ignore = ['RootComp', 'MessageCount']
103  is_success = True
104  for step in test_results['result']:
105  result_string += "%s: %d, " % (step['name'], step['result'])
106  if step['name'] not in steps_to_ignore and step['result'] != 0:
107  is_success = False
108  result_string += "exit: %d" % test_results['exit_code']
109  return result_string, is_success
110 
111 

◆ analyse_results()

def runTrigART.analyse_results (   all_test_results)
Prints a summary table of all results and returns two lists. One includes names of failed tests,
the other names of tests in which only the RootComp step failed. If only RootComp fails, the test is
not added to the first list, as we currently do not enforce updating RootComp references on every change.

Definition at line 112 of file runTrigART.py.

112 def analyse_results(all_test_results):
113  '''Prints a summary table of all results and returns two lists. One includes names of failed tests,
114  the other names of tests in which only the RootComp step failed. If only RootComp fails, the test is
115  not added to the first list, as we currently do not enforce updating RootComp references on every change.'''
116  failed_tests = []
117  table = {} # test name : results
118  for test_name in all_test_results.keys():
119  if test_name.endswith('.py'):
120  result_string, is_success = analyse_result_python(all_test_results[test_name])
121  else:
122  result_string, is_success = analyse_result_shell(all_test_results[test_name])
123  table[test_name] = result_string
124  if not is_success:
125  failed_tests.append(test_name)
126  max_len_col1 = len(max(table.keys(), key=len))
127  max_len_col2 = len(max(table.values(), key=len))
128  logging.info('-'*(max_len_col1+max_len_col2+7))
129  for k, v in table.items():
130  logging.info('| {col1:<{width1}} | {col2:<{width2}} |'.format(
131  col1=k, width1=max_len_col1,
132  col2=v, width2=max_len_col2))
133  logging.info('-'*(max_len_col1+max_len_col2+7))
134  return failed_tests
135 
136 

◆ get_parser()

def runTrigART.get_parser ( )

Definition at line 29 of file runTrigART.py.

29 def get_parser():
30  packages = ['TriggerTest', 'TrigAnalysisTest', 'TrigP1Test', 'ALL']
31  parser = argparse.ArgumentParser(usage='%(prog)s [options] [PackageName]')
32  parser.add_argument('package',
33  metavar='PackageName',
34  default='ALL',
35  nargs='?',
36  help='Name of the package from which to run ART tests. Options are: %(choices)s.'
37  ' If no name is provided, %(default)s is used.',
38  choices=packages)
39  parser.add_argument('-m', '--minimal',
40  action='store_true',
41  help='Run a small pre-defined set of tests for basic verification')
42  parser.add_argument('-n', '--testName',
43  metavar='pattern',
44  help='Run only tests with a given pattern in the name')
45  parser.add_argument('-r', '--rerun-failed',
46  action='store_true',
47  help='Run tests that failed previously')
48  parser.add_argument('-s', '--summary',
49  action='store_true',
50  help='Print summary of previously run tests')
51  parser.add_argument('-t', '--artType',
52  metavar='type',
53  default='build',
54  choices=['build', 'grid'],
55  help='Run tests with the given art-type: build (default) or grid')
56  parser.add_argument('-j', '--maxJobs',
57  metavar='N',
58  type=int,
59  default=1,
60  help='Run up to N tests in parallel (actual number may be limited by ART based on available CPUs and memory)')
61  parser.add_argument('-v', '--verbose',
62  action='store_true',
63  help='Increase output verbosity')
64  parser.add_argument('-d', '--dryRun',
65  action='store_true',
66  help='List tests which would be executed, but don\'t execute them')
67 
68  return parser
69 
70 

◆ get_patterns()

def runTrigART.get_patterns (   args)

Definition at line 71 of file runTrigART.py.

71 def get_patterns(args):
72  patterns = ['^test_', '(.sh|.py)$', package_prefix(args.package)]
73  if args.testName:
74  patterns.append(args.testName)
75  if args.artType == 'grid':
76  patterns.append('_grid.')
77  else:
78  patterns.append('_build.')
79  if args.minimal:
80  patterns.append(minimal_pattern(args.package))
81 
82  logging.debug("Searching for scripts with the following patterns: %s", patterns)
83  return patterns
84 
85 

◆ main()

def runTrigART.main ( )

Definition at line 192 of file runTrigART.py.

192 def main():
193  args = get_parser().parse_args()
194  logging.basicConfig(stream=sys.stdout,
195  format='%(levelname)-8s %(message)s',
196  level=logging.DEBUG if args.verbose else logging.INFO)
197 
198  topdir = 'runTrigART'
199  statusfile = 'results/{:s}-status.json'.format(topdir)
200 
201  # Load previous results
202  if args.rerun_failed or args.summary:
203  status_data = json.load(open(os.path.join(topdir,statusfile)))
204  all_test_results = status_data[topdir]
205  failed_tests = analyse_results(all_test_results)
206  if args.summary:
207  print_summary(all_test_results, failed_tests)
208  return 0
209 
210  if args.rerun_failed:
211  scripts = find_scripts(get_patterns(args)+['|'.join(failed_tests)])
212  else:
213  scripts = find_scripts(get_patterns(args))
214 
215  logging.info("The following %d tests will be executed: ", len(scripts))
216  for filename in scripts:
217  logging.info(" %s", os.path.basename(filename))
218 
219  if len(scripts) > 5*args.maxJobs:
220  if args.maxJobs == 1:
221  logging.warning("You are running %d tests in sequence. This may take "
222  "a long time, consider using -j N option.", len(scripts))
223  else:
224  logging.warning("You are running %d tests with %d parallel jobs. "
225  "This may take a long time.", len(scripts), args.maxJobs)
226 
227  if args.dryRun:
228  return 0
229 
230  success = True
231  with remember_cwd():
232  prep_dirs(topdir, scripts)
233  os.chdir(topdir)
234  for script_path in scripts:
235  target = 'test/' + os.path.basename(script_path)
236  os.symlink(script_path, target)
237 
238  # Set up and run ART
239  commands = [
240  'export ATLAS_LOCAL_ROOT_BASE="${ATLAS_LOCAL_ROOT_BASE:-/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase}"',
241  'source "${ATLAS_LOCAL_ROOT_BASE}"/user/atlasLocalSetup.sh --quiet',
242  'lsetup -q art']
243  art_cmd = 'art.py run --run-all-tests --max-jobs={:d} {:s} . results'.format(args.maxJobs, '-v' if args.verbose else '-q')
244  commands.append(art_cmd)
245  cmd = ' && '.join(commands)
246  logging.info("Executing ART command: %s", art_cmd)
247  subprocess.call(cmd, shell=True)
248  logging.info("ART finished, analysing the results\n")
249 
250  # Read the result summary from JSON
251  if not os.path.isfile(statusfile):
252  logging.error("ART status.json file is missing - likely the ART runner failed!")
253  exit(1)
254  with open(statusfile, 'r') as f:
255  status_data = json.load(f)
256  all_test_results = status_data[topdir]
257  if len(all_test_results) != len(scripts):
258  logging.warning("Selected %d tests but ART executed only %d. Please check why some tests did not run!")
259  failed_tests = analyse_results(all_test_results)
260  print_summary(all_test_results, failed_tests)
261  print_failed_ref_comp(all_test_results, failed_tests)
262  if len(failed_tests) > 0:
263  success = False
264 
265  if not success:
266  exit(1)
267  else:
268  exit(0)
269 
270 

◆ minimal_pattern()

def runTrigART.minimal_pattern (   package)

Definition at line 16 of file runTrigART.py.

16 def minimal_pattern(package):
17  dict = {'TriggerTest': 'test_trig_data_v1Dev_build',
18  'TrigP1Test': 'test_trigP1_v1Dev_decodeBS_build',
19  'TrigAnalysisTest': 'test_trigAna_RDOtoRDOTrig_v1Dev_build'}
20  if package == 'ALL':
21  return '({})'.format('|'.join([v for v in dict.values() if v]))
22  elif package in dict and dict[package] is not None:
23  return dict[package]
24  else:
25  logging.error("Minimal set of tests for %s is not defined.", package)
26  exit(1)
27 
28 

◆ prep_dirs()

def runTrigART.prep_dirs (   topdir,
  scripts 
)
Creates test result structure if missing, if present clears the area only for the tests to be run

Definition at line 180 of file runTrigART.py.

180 def prep_dirs(topdir, scripts):
181  """ Creates test result structure if missing, if present clears the area only for the tests to be run"""
182 
183  shutil.rmtree(topdir+'/test', ignore_errors=True)
184  os.makedirs(topdir+'/test', exist_ok=True)
185 
186  # clear results dir
187  for script in scripts:
188  toerase = topdir+'/results/runTrigART/'+os.path.splitext(os.path.basename(script))[0]
189  shutil.rmtree(toerase, ignore_errors=True)
190 
191 

◆ print_failed_ref_comp()

def runTrigART.print_failed_ref_comp (   all_test_results,
  failed_tests 
)

Definition at line 154 of file runTrigART.py.

154 def print_failed_ref_comp(all_test_results, failed_tests):
155  if len(failed_tests) == 0:
156  return
157 
158  step_name = 'CountRefComp'
159  for test_name in failed_tests:
160  athena_failed = False
161  refcomp_failed = False
162  test_results = all_test_results[test_name]
163  for step in test_results['result']:
164  if step['name'] in ['athena', 'athenaHLT', 'Reco_tf', 'CheckLog'] and step['result'] != 0:
165  athena_failed = True
166  if step['name'] == step_name and step['result'] != 0:
167  refcomp_failed = True
168 
169  # Print only if athena didn't fail, as a failed job will always give wrong counts
170  if refcomp_failed and not athena_failed:
171  log_path = 'results/runTrigART/{:s}/{:s}.log'.format(os.path.splitext(test_name)[0], step_name)
172  if not os.path.isfile(log_path):
173  continue
174  logging.info("==================================================")
175  logging.info('Printing output of failed CountRefComp for %s', test_name)
176  subprocess.call('cat ' + log_path, shell=True)
177  logging.info("==================================================")
178 
179 

◆ print_summary()

def runTrigART.print_summary (   all_test_results,
  failed_tests 
)

Definition at line 137 of file runTrigART.py.

137 def print_summary(all_test_results, failed_tests):
138  if len(failed_tests) > 0:
139  logging.info(
140  "%d tests succeeded out of %d executed",
141  len(all_test_results)-len(failed_tests),
142  len(all_test_results))
143  logging.error("==================================================")
144  logging.error("The following %d tests failed:", len(failed_tests))
145  for test_name in failed_tests:
146  logging.error(" %s", test_name)
147  logging.error("==================================================")
148  else:
149  logging.info("==================================================")
150  logging.info("All %d executed tests succeeded", len(all_test_results))
151  logging.info("==================================================")
152 
153 
max
#define max(a, b)
Definition: cfImp.cxx:41
vtune_athena.format
format
Definition: vtune_athena.py:14
runTrigART.analyse_result_shell
def analyse_result_shell(test_results)
Definition: runTrigART.py:97
python.TrigARTUtils.package_prefix
def package_prefix(package)
Definition: TrigARTUtils.py:12
runTrigART.print_failed_ref_comp
def print_failed_ref_comp(all_test_results, failed_tests)
Definition: runTrigART.py:154
runTrigART.prep_dirs
def prep_dirs(topdir, scripts)
Definition: runTrigART.py:180
runTrigART.get_patterns
def get_patterns(args)
Definition: runTrigART.py:71
runTrigART.analyse_results
def analyse_results(all_test_results)
Definition: runTrigART.py:112
runTrigART.minimal_pattern
def minimal_pattern(package)
Definition: runTrigART.py:16
calibdata.exit
exit
Definition: calibdata.py:236
python.TrigARTUtils.find_scripts
def find_scripts(patterns)
Definition: TrigARTUtils.py:30
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
runTrigART.analyse_result_python
def analyse_result_python(test_results)
Definition: runTrigART.py:86
runTrigART.main
def main()
Definition: runTrigART.py:192
Trk::open
@ open
Definition: BinningType.h:40
python.TrigARTUtils.remember_cwd
def remember_cwd()
Definition: TrigARTUtils.py:50
runTrigART.get_parser
def get_parser()
Definition: runTrigART.py:29
confTool.parse_args
def parse_args()
Definition: confTool.py:35
runTrigART.print_summary
def print_summary(all_test_results, failed_tests)
Definition: runTrigART.py:137