ATLAS Offline Software
RunWorkflowTests_Run3.py
Go to the documentation of this file.
1 #!/usr/bin/env python
2 # Copyright (C) 2002-2023 CERN for the benefit of the ATLAS collaboration
3 
4 from sys import exit
5 
6 from WorkflowTestRunner.ScriptUtils import setup_logger, setup_parser, get_test_setup, get_standard_performance_checks, \
7  run_tests, run_checks, run_summary
8 from WorkflowTestRunner.StandardTests import DerivationTest, GenerationTest, OverlayTest, PileUpTest, QTest, SimulationTest
9 from WorkflowTestRunner.Test import WorkflowRun, WorkflowType
10 
11 
12 def main():
13  name = "Run3Tests"
14  run = WorkflowRun.Run3
15 
16  # Setup the environment
17  log = setup_logger(name)
18  parser = setup_parser()
19  options = parser.parse_args()
20  setup = get_test_setup(name, options, log)
21 
22  # Define which tests to run
23  tests_to_run = []
24  if options.generation:
25  dsid = "421356" if not options.dsid else options.dsid
26  tests_to_run.append(GenerationTest(f"gen{dsid}", run, WorkflowType.Generation, ["generate"], setup, options.extra_args))
27  elif options.simulation:
28  if not options.workflow or options.workflow is WorkflowType.FullSim:
29  ami_tag = "s4006" if not options.ami_tag else options.ami_tag
30  tests_to_run.append(SimulationTest(ami_tag, run, WorkflowType.FullSim, ["EVNTtoHITS"], setup, options.extra_args + " --conditionsTag 'default:OFLCOND-MC21-SDR-RUN3-07' --geometryVersion 'default:ATLAS-R3S-2021-03-02-00'"))
31  if options.workflow is WorkflowType.AF3:
32  ami_tag = "a913" if not options.ami_tag else options.ami_tag
33  tests_to_run.append(SimulationTest(ami_tag, run, WorkflowType.AF3, ["EVNTtoHITS"], setup, options.extra_args))
34  if options.workflow is WorkflowType.HitsMerge:
35  ami_tag = "s4007" if not options.ami_tag else options.ami_tag
36  tests_to_run.append(SimulationTest(ami_tag, run, WorkflowType.HitsMerge, ["HITSMerge"], setup, options.extra_args))
37  if options.workflow is WorkflowType.HitsFilter:
38  ami_tag = "s4008" if not options.ami_tag else options.ami_tag
39  tests_to_run.append(SimulationTest(ami_tag, run, WorkflowType.HitsFilter, ["FilterHitTf"], setup, options.extra_args))
40  elif options.overlay:
41  if not options.workflow or options.workflow is WorkflowType.MCOverlay:
42  tests_to_run.append(OverlayTest("d1759", run, WorkflowType.MCOverlay, ["Overlay"], setup, options.extra_args + " --runNumber 601229 --conditionsTag 'default:OFLCOND-MC23-SDR-RUN3-05'"))
43  elif options.pileup:
44  if setup.parallel_execution:
45  log.error("Parallel execution not supported for pile-up workflow")
46  exit(1)
47  if not options.workflow or options.workflow is WorkflowType.PileUpPresampling:
48  ami_tag = "d1919" if not options.ami_tag else options.ami_tag
49  tests_to_run.append(PileUpTest(ami_tag, run, WorkflowType.PileUpPresampling, ["HITtoRDO"], setup, options.extra_args))
50  if not options.workflow or options.workflow is WorkflowType.MCPileUpReco:
51  tests_to_run.append(QTest("q455", run, WorkflowType.MCPileUpReco, ["Overlay", "RDOtoRDOTrigger", "RAWtoALL"], setup, options.extra_args))
52  elif options.derivation:
53  test_id = "MC_PHYS" if not options.ami_tag else options.ami_tag
54  test_id = f"{test_id}_{run.value}"
55  tests_to_run.append(DerivationTest(test_id, run, WorkflowType.Derivation, ["Derivation"], setup, options.extra_args))
56  else:
57  if not options.workflow or options.workflow is WorkflowType.MCReco:
58  ami_tag = "q454" if not options.ami_tag else options.ami_tag
59  tests_to_run.append(QTest(ami_tag, run, WorkflowType.MCReco, ["HITtoRDO", "RDOtoRDOTrigger", "RAWtoALL"], setup, options.extra_args))
60  if not options.workflow or options.workflow is WorkflowType.DataReco:
61  ami_tag = "q449" if not options.ami_tag else options.ami_tag
62  tests_to_run.append(QTest(ami_tag, run, WorkflowType.DataReco, ["RAWtoALL", "DQHistogramMerge"], setup, options.extra_args))
63 
64  # Define which perfomance checks to run
65  performance_checks = get_standard_performance_checks(setup)
66 
67  # Define and run jobs
68  run_tests(setup, tests_to_run)
69 
70  # Run post-processing checks
71  all_passed = run_checks(setup, tests_to_run, performance_checks)
72 
73  # final report
74  run_summary(setup, tests_to_run, all_passed)
75 
76 
77 if __name__ == "__main__":
78  main()
79 
python.ScriptUtils.setup_logger
logging.Logger setup_logger(str name)
Definition: ScriptUtils.py:15
python.ScriptUtils.get_standard_performance_checks
List[WorkflowCheck] get_standard_performance_checks(TestSetup setup)
Definition: ScriptUtils.py:224
python.ScriptUtils.run_summary
None run_summary(TestSetup setup, List[WorkflowTest] tests, bool status)
Definition: ScriptUtils.py:284
calibdata.exit
exit
Definition: calibdata.py:236
python.ScriptUtils.setup_parser
ArgumentParser setup_parser()
Definition: ScriptUtils.py:63
RunWorkflowTests_Run3.main
def main()
Definition: RunWorkflowTests_Run3.py:12
python.ScriptUtils.get_test_setup
TestSetup get_test_setup(str name, Namespace options, logging.Logger log)
Definition: ScriptUtils.py:135
python.ScriptUtils.run_tests
None run_tests(TestSetup setup, List[WorkflowTest] tests)
Definition: ScriptUtils.py:234
python.ScriptUtils.run_checks
bool run_checks(TestSetup setup, List[WorkflowTest] tests, List[WorkflowCheck] performance_checks)
Definition: ScriptUtils.py:269