ATLAS Offline Software
Loading...
Searching...
No Matches
RatesAnalysisPostProcessing.py
Go to the documentation of this file.
1#!/usr/bin/env python
2#
3# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration
4#
5
6'''
7@file RatesPostProcessing.py
8@author T. Martin
9@date 2020-02-04
10@brief Script to consume merged rates histograms from the RatesAnalysis package and produce structured CSV, JSON output.
11'''
12
13import ROOT
14from RatesAnalysis.Util import getTableName, getMetadata, populateTriggers, getGlobalGroup, toJson, toCSV, toROOT, populateScanTriggers, slice_dictionary
15from AthenaCommon.Logging import logging
16
17
18
19def main():
20 from argparse import ArgumentParser
21 parser = ArgumentParser()
22 parser.add_argument('--file', default='RatesHistograms.root',
23 help='Input ROOT file to generate output from, run hadd first if you have more than one')
24 parser.add_argument('--outputTag', default='LOCAL',
25 help='Tag identifying this processing to be used in the output folder name (any underscores will be removed)')
26 parser.add_argument('--outputJSONFile', default='rates.json',
27 help='JSON file of rates for use with the RuleBook')
28 parser.add_argument('--outputROOTFile', default='scanTriggers.root',
29 help='ROOT file of normalised scan-triggers')
30 parser.add_argument('--userDetails',
31 help='User supplied metadata string giving any extra details about this run.')
32 parser.add_argument('--jira',
33 help='Related jira ticket number')
34 parser.add_argument('--amiTag',
35 help='AMI tag used for data reprocessing')
36 parser.add_argument('--doBinomialCorrection', action='store_true',
37 help='apply binomial correction to trigger rates when using Monte Carlo JZ slices.')
38
39
40 args = parser.parse_args()
41 log = logging.getLogger('RatesPostProcessing')
42
43 inputFile = ROOT.TFile(args.file, 'READ')
44
45 metadata = getMetadata(inputFile)
46
47 normdict = slice_dictionary(inputFile, "normalisation")
48
49 if len(normdict)==0 or metadata is None:
50 log.error('Cannot locate normHist, or metadata in top level of ntuple.')
51 exit()
52
53 for suffix, normHist in normdict.items():
54 metadata['normalisation'+suffix] = normHist.GetBinContent(1)
55 metadata['n_evts'+suffix] = normHist.GetBinContent(2)
56 metadata['n_evts_weighted'+suffix] = normHist.GetBinContent(3)
57
58 metadata['details'] = args.userDetails
59 metadata['JIRA'] = args.jira
60 metadata['amiTag'] = args.amiTag
61 metadata['doBinomialCorrection'] = args.doBinomialCorrection
62
63 HLTGlobalGroup = getGlobalGroup(inputFile, 'RATE_GLOBAL_HLT')
64 L1GlobalGroup = getGlobalGroup(inputFile, 'RATE_GLOBAL_L1')
65
66 L1Triggers = populateTriggers(inputFile, metadata, L1GlobalGroup, 'ChainL1')
67 HLTTriggers = populateTriggers(inputFile, metadata, HLTGlobalGroup, 'ChainHLT')
68 AllGlobalGroups = populateTriggers(inputFile, metadata, HLTGlobalGroup, 'Group')
69
70 scanTriggers = populateScanTriggers(inputFile, metadata)
71
72
73 if not scanTriggers and (not L1Triggers or not HLTTriggers or not AllGlobalGroups):
74 log.error("Failed to populate triggers")
75 return
76
77 L1Table = getTableName("L1")
78 HLTTable = getTableName("HLT")
79 GroupTable = getTableName("Group")
80
81 log.info("Exporting " + args.outputJSONFile)
82 toJson(args.outputJSONFile, metadata, L1Triggers, HLTTriggers)
83 log.info("Exporting " + HLTTable)
84 toCSV(HLTTable, metadata, HLTTriggers)
85 log.info("Exporting " + L1Table)
86 toCSV(L1Table, metadata, L1Triggers)
87 log.info("Exporting " + GroupTable)
88 toCSV(GroupTable, metadata, AllGlobalGroups)
89 log.info("Exporting scan triggers")
90 toROOT(args.outputROOTFile, scanTriggers)
91
92if __name__== "__main__":
93 main()