ATLAS Offline Software
Loading...
Searching...
No Matches
muonBucketDump.py
Go to the documentation of this file.
1# Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
2
3
4def main(args):
5 from MuonGeoModelTestR4.testGeoModel import setupGeoR4TestCfg
6 from MuonConfig.MuonConfigUtils import executeTest, setupHistSvcCfg
7 from AthenaConfiguration.AllConfigFlags import initConfigFlags
8 flags = initConfigFlags()
9 flags.PerfMon.doFullMonMT = True
10
11 from AthOnnxComps.OnnxRuntimeFlags import OnnxRuntimeType
12 use_gpu_requested = getattr(args, "use_gpu", True)
13 gpu_available = False
14 try:
15 import onnxruntime as ort
16 gpu_available = "CUDAExecutionProvider" in ort.get_available_providers()
17 except Exception:
18 try:
19 import torch
20 gpu_available = torch.cuda.is_available()
21 except Exception:
22 gpu_available = False
23 if use_gpu_requested and gpu_available:
24 flags.AthOnnx.ExecutionProvider = OnnxRuntimeType.CUDA
25 else:
26 flags.AthOnnx.ExecutionProvider = OnnxRuntimeType.CPU
27
28 flags, cfg = setupGeoR4TestCfg(args)
29
30 cfg.merge(setupHistSvcCfg(flags,outFile=args.outRootFile,
31 outStream="MuonBucketDump"))
32
33 from MuonConfig.MuonDataPrepConfig import xAODUncalibMeasPrepCfg
34 cfg.merge(xAODUncalibMeasPrepCfg(flags))
35
36 from MuonSpacePointFormation.SpacePointFormationConfig import MuonSpacePointFormationCfg
37 cfg.merge(MuonSpacePointFormationCfg(flags))
38
39 from MuonPatternRecognitionAlgs.MuonPatternRecognitionConfig import MuonPatternRecognitionCfg
40 cfg.merge(MuonPatternRecognitionCfg(flags))
41
42 if getattr(args, "doMLBucketFilter", False):
43 from MuonInference.InferenceConfig import GraphBucketFilterToolCfg, GraphInferenceAlgCfg
44 bias = getattr(args, "mlBucketBias", 1.0)
45 bucketTool = cfg.popToolsAndMerge(
46 GraphBucketFilterToolCfg(
47 flags,
48 BiasClass0=bias,
49 WriteSpacePointKey="FilteredMlBuckets",
50 ModelPath="/cvmfs/atlas-nightlies.cern.ch/repo/data/data-art/MuonRecRTT/edgecnn_multi_bucket_sparse_meta.onnx"
51 )
52 )
53 cfg.merge(
54 GraphInferenceAlgCfg(
55 flags,
56 InferenceTools=[bucketTool],
57 )
58 )
59
60 from MuonBucketDump.MuonBucketDumpConfig import MuonBucketDumpCfg
61 from MuonPatternRecognitionTest.PatternTestConfig import PatternVisualizationToolCfg
62 cfg.merge(MuonBucketDumpCfg(flags,
63 DoCaloDump=getattr(args, "doCaloDump", False),
64 DoMLBucketScore=getattr(args, "doMLBucketScore", False),
65 DoMLBucketFilter=getattr(args, "doMLBucketFilter", False),
66 MLBucketBias=getattr(args, "mlBucketBias", 1.0),
67 VisualizationTool = cfg.popToolsAndMerge(PatternVisualizationToolCfg(flags, CanvasLimits =0))))
68 if args.doTruthMuonVertexDump:
69 from MuonBucketDump.MuonBucketDumpConfig import TruthMuonVertexDumpCfg
70 cfg.merge(TruthMuonVertexDumpCfg(flags))
71
72 executeTest(cfg)
73
74if __name__=="__main__":
75 from MuonGeoModelTestR4.testGeoModel import SetupArgParser, MuonPhaseIITestDefaults
76 parser = SetupArgParser()
77 parser.set_defaults(nEvents = -1)
78 parser.set_defaults(outRootFile="MuonBucketDump_R3SimHits.root")
79 parser.set_defaults(inputFile=MuonPhaseIITestDefaults.HITS_PG_R3)
80 parser.add_argument("--doCaloDump", action="store_true", default=False,
81 help="Run calorimeter reconstruction and dump cell energy/position.")
82
83 parser.add_argument("--doMLBucketScore", action="store_true", default=False,
84 help="Run ML inference and dump bucket filter scores.")
85
86 parser.add_argument("--doMLBucketFilter", action="store_true", default=False,
87 help="Run ML bucket filtering and write filtered buckets to 'FilteredMlBuckets' container.")
88
89 parser.add_argument("--mlBucketBias", type=float, default=1.0,
90 help="Bias value for class 0 in ML bucket classification (default: 1.0). Higher values make class 0 less likely.")
91
92 parser.add_argument("--doTruthMuonVertexDump", action="store_true", help="Run the TruthMuonVertexDumperAlg to dump truth muon vertex information", default=False)
93
94 parser.add_argument("--use-gpu", action="store_true", default=True,
95 help="Use GPU for ONNX inference (default: True)")
96 parser.add_argument("--use-cpu", dest="use_gpu", action="store_false",
97 help="Use CPU for ONNX inference")
98
99 args = parser.parse_args()
100 main(args)
101
102
int main()
Definition hello.cxx:18