ATLAS Offline Software
Loading...
Searching...
No Matches
python.RatesAnalysisOnlineProcessing Namespace Reference

Classes

class  RateEntry

Functions

 readLbStartFromCool (runNumber, lb)
 readLbEndFromCool (runNumber, lb)
 readLbFromCool (runNumber, lb)
 readChainsGroups (smkey="", dbAlias="")
 readHLTPrescales (pskey="", dbAlias="")
 readL1Prescales (pskey="", dbAlias="")
 findKeysForRange (psk, lbStart, lbEnd)
 toCSV (fileName, dirName, data)
 saveMetadata (dirName, data)
 readAvgLumi (runNumber, lbStart, lbEnd)
 main ()

Variables

 log = logging.getLogger('RatesAnalysisOnlineProcessing')

Detailed Description

@file RatesAnalysisOnlineProcessing.py
@brief Script to create summaries with online P1 rates retrieved from pbeast, prescales and unprescaled.
       Used for validation of Enhanced Bias weighting.
       For the pbeast authentication use: export PBEAST_SERVER_SSO_SETUP_TYPE=AutoUpdateKerberos

Function Documentation

◆ findKeysForRange()

python.RatesAnalysisOnlineProcessing.findKeysForRange ( psk,
lbStart,
lbEnd )
Retrieve all prescale keys combinations within given lumiblocks

Definition at line 135 of file RatesAnalysisOnlineProcessing.py.

135def findKeysForRange(psk, lbStart, lbEnd):
136 '''
137 Retrieve all prescale keys combinations within given lumiblocks
138 '''
139 # Convert str to list of ntups
140 it = iter("".join(c for c in psk if c not in "()[] ").split(","))
141 pskList = [(int(x), int(y), int(z)) for x, y, z in zip(it, it, it)]
142
143 log.debug("Reading the key ranges for {0}".format(psk))
144
145 # COOL format of prescale entry is (key, lbStart, lbEnd)
146 foundKeys = []
147 for prescaleEntry in pskList:
148 if prescaleEntry[1] <= lbEnd and prescaleEntry[2] >= lbStart:
149 log.debug("Found prescale key in the range: {0}".format(prescaleEntry))
150 foundKeys.append(prescaleEntry)
151
152 if not foundKeys:
153 log.warning("Cannot find one prescale for lumiblocks {0} to {1}. Available values: {2}".format(lbStart, lbEnd, pskList))
154 return []
155
156 return foundKeys
157
158
std::vector< std::string > split(const std::string &s, const std::string &t=":")
Definition hcg.cxx:177

◆ main()

python.RatesAnalysisOnlineProcessing.main ( )

Definition at line 217 of file RatesAnalysisOnlineProcessing.py.

217def main():
218 from argparse import ArgumentParser
219 parser = ArgumentParser()
220 parser.add_argument('--runNumber', required=True, type=int,
221 help='Number of run to process')
222 parser.add_argument('--lbStart', required=True, type=int,
223 help='First lumiblock to record Enhanced Bias data')
224 parser.add_argument('--lbEnd', required=True, type=int,
225 help='Last lumiblock to record Enhanced Bias data')
226 parser.add_argument('-s','--server', default='https://atlasop.cern.ch',
227 help="Pbeast server url. For GPN: https://atlasop.cern.ch, for P1: http://pc-tdq-bst-05.cern.ch:8080")
228 parser.add_argument('--loglevel', type=int, default=3,
229 help='Verbosity level: 1 - VERBOSE, 2 - DEBUG, 3 - INFO')
230 args = parser.parse_args()
231 log.setLevel(args.loglevel)
232
233 startOfRange = readLbStartFromCool(args.runNumber, args.lbStart)
234 endOfRange = readLbEndFromCool(args.runNumber, args.lbEnd)
235
236 from time import ctime
237 log.info("Rates will be retrieved for lumiblocks {0}-{1}: {2} - {3}".format(args.lbStart, args.lbEnd, ctime(startOfRange/1E6), ctime(endOfRange/1E6)))
238 log.debug("Start and end timestamps: {0} {1}".format(startOfRange, endOfRange))
239
240 # Read prescales and groups to save in the csv summary
241 # The format of the configuration is defined in https://gitlab.cern.ch/atlas/athena/-/blob/main/Trigger/TrigCost/TrigCostAnalysis/python/CostMetadataUtil.py#L238
242 from TrigCostAnalysis.CostMetadataUtil import readHLTConfigKeysFromCOOL
243 configMetadata = readHLTConfigKeysFromCOOL(args.runNumber)
244
245 # The start and stop lumiblock should not include the lumiblocks where we did the keys change
246 hltAvailableKeys = findKeysForRange(configMetadata[3]["HLTPSK"], args.lbStart, args.lbEnd)
247 l1AvailableKeys = findKeysForRange(configMetadata[4]["LVL1PSK"], args.lbStart, args.lbEnd)
248
249 # Create list of key ranges - it will store tuples (l1Psk, hltPsk, rangeStart, rangeEnd)
250 # Four cases are handled: L1 range starts before HLT range and ends later, HLT range starts before L1 range and ends later,
251 # L1 range is withing HLT range, and HLT range is within L1 range
252 keyRangesList = []
253 for hltEntry in hltAvailableKeys:
254 hltLbStart = hltEntry[1]
255 hltLbEnd = hltEntry[2]
256
257 for l1Entry in l1AvailableKeys:
258 l1LbStart = l1Entry[1]
259 l1LbEnd = l1Entry[2]
260
261 if (hltLbStart >= l1LbStart) and (hltLbEnd >= l1LbEnd) and (hltLbStart <= l1LbEnd):
262 keyRangesList.append((l1Entry[0], hltEntry[0], hltLbStart, l1LbEnd))
263 elif (hltLbStart <= l1LbStart) and (hltLbEnd <= l1LbEnd) and (l1LbStart <= hltLbEnd):
264 keyRangesList.append((l1Entry[0], hltEntry[0], l1LbStart, hltLbEnd))
265 elif (hltLbStart <= l1LbStart) and (hltLbEnd >= l1LbEnd):
266 keyRangesList.append((l1Entry[0], hltEntry[0], l1LbStart, l1LbEnd))
267 elif (hltLbStart >= l1LbStart) and (hltLbEnd <= l1LbEnd):
268 keyRangesList.append((l1Entry[0], hltEntry[0], hltLbStart, hltLbEnd))
269
270 log.debug("Available key ranges are {0}".format(keyRangesList))
271 chainGroups = readChainsGroups(configMetadata[2]["SMK"], configMetadata[0]["DB"])
272
273 pbeast = None
274 try:
275 import libpbeastpy
276 pbeast = libpbeastpy.ServerProxy(args.server)
277 except ImportError as e:
278 log.error("Exeption when reading the pbeast information. Remember to setup the tdaq release!\n{0}".format(e))
279 return
280
281 hltChains = {}
282 l1Items = {}
283 groups = {}
284 for keysRange in keyRangesList:
285 # Save the range start and end and make sure we are in given limits
286 lbStart = keysRange[2] if keysRange[2] > args.lbStart else args.lbStart
287 lbEnd = keysRange[3] if keysRange[3] < args.lbEnd else args.lbEnd
288 startOfKeysRange = readLbStartFromCool(args.runNumber, lbStart)
289 endOfKeysRange = readLbEndFromCool(args.runNumber, lbEnd)
290 log.debug("Current range is {0}-{1}. Timestamps are {2}-{3}".format(lbStart, lbEnd, ctime(startOfKeysRange/1E6), ctime(endOfKeysRange/1E6)))
291
292 hltPrescales = readHLTPrescales(keysRange[1], configMetadata[0]["DB"])
293 l1Prescales = readL1Prescales(keysRange[0], configMetadata[0]["DB"])
294
295 # Read prescaled IS rates from pbeast and calculate unprescaled rates
296 # Queries are based on TRP grafana dashboard
297 try:
298 hltRates = pbeast.get_data('ATLAS', 'HLT_Rate', 'Output', 'ISS_TRP.HLT_.*', True, startOfKeysRange, endOfKeysRange)[0].data
299 l1Rates = pbeast.get_data('ATLAS', 'L1_Rate', 'TAV', 'ISS_TRP.L1_.*', True, startOfKeysRange, endOfKeysRange)[0].data
300 streamRates = pbeast.get_data('ATLAS', 'HLT_Rate', 'Output', 'ISS_TRP.str_.*', True, startOfKeysRange, endOfKeysRange)[0].data
301 groupRates = pbeast.get_data('ATLAS', 'HLT_Rate', 'Output', 'ISS_TRP.grp_.*', True, startOfKeysRange, endOfKeysRange)[0].data
302 recordingRates = pbeast.get_data('ATLAS', 'SFOngCounters', 'WritingEventRate', 'DF.TopMIG-IS:HLT.Counters.*', True, startOfKeysRange, endOfKeysRange)[0].data
303 except RuntimeError as e:
304 log.error("Exception when reading the pbeast information. Remember to export the pbeast server sso!\n{0}".format(e))
305 return
306
307 for chain in hltRates:
308 chainName = chain.replace("ISS_TRP.", "")
309 if chainName not in chainGroups:
310 log.warning("Chain {0} is missing from the current menu".format(chainName))
311 continue
312
313 l1Item = "L1_" + chainName.split("L1")[1]
314 l1psk = l1Prescales[l1Item] if l1Item in l1Prescales else "-"
315 keysStr = "L1:{0} HLT:{1}".format(l1psk, hltPrescales[chainName])
316 if chainName not in hltChains:
317 hltChains[chainName] = RateEntry(chainName, chainGroups[chainName], keysStr)
318 else:
319 hltChains[chainName].appendKeys(keysStr)
320
321 for dataPoint in hltRates[chain]:
322 if dataPoint.value == 0:
323 # Skip avg rate = 0
324 continue
325 elif dataPoint.ts < startOfKeysRange or dataPoint.ts > endOfKeysRange:
326 # Skip data point outside analyzed range
327 continue
328
329 rate = dataPoint.value
330 if rate > 0 and hltPrescales[chainName] <=0:
331 log.warning("Rate for disabled chain {0} is higher than 0! {1} timestamp {2}".format(chainName, rate, ctime(dataPoint.ts/1E6)))
332
333 rateUn = rate * hltPrescales[chainName] * (l1Prescales[l1Item] if l1Item in l1Prescales else 1)
334 hltChains[chainName].appendRate(rate, rateUn)
335
336 for item in l1Rates:
337 itemName = item.replace("ISS_TRP.", "")
338 if "--enabled" not in itemName:
339 continue
340 else:
341 itemName = itemName.replace("--enabled", "")
342
343 if itemName not in l1Items:
344 l1Items[itemName] = RateEntry(itemName, "-", "L1:{0}".format(l1Prescales[itemName]))
345 else:
346 l1Items[itemName].appendKeys("L1:{0}".format(l1Prescales[itemName]))
347
348 for dataPoint in l1Rates[item]:
349 if dataPoint.value == 0:
350 continue
351 elif dataPoint.ts < startOfKeysRange or dataPoint.ts > endOfKeysRange:
352 continue
353 rate = dataPoint.value
354 if rate > 0 and l1Prescales[itemName] <=0:
355 log.warning("Rate for disabled chain {0} is higher than 0! {1} timestamp {2}".format(chainName, rate, ctime(dataPoint.ts/1E6)))
356 rateUn = rate * l1Prescales[itemName]
357
358 l1Items[itemName].appendRate(rate, rateUn)
359
360 groupRates = groupRates | streamRates | recordingRates
361 for group in groupRates:
362 groupName = group.replace("ISS_TRP.", "") \
363 .replace("grp", "RATE") \
364 .replace("str", "STREAM") \
365 .replace("DF.TopMIG-IS:HLT.Counters.", "HLT_recording_")
366
367 if groupName not in groups:
368 groups[groupName] = RateEntry(groupName, "-", "Multiple")
369
370 for dataPoint in groupRates[group]:
371 if dataPoint.value == 0:
372 continue
373 elif dataPoint.ts < startOfKeysRange or dataPoint.ts > endOfKeysRange:
374 continue
375 groups[groupName].appendRate(dataPoint.value, 0)
376
377 # Save the results
378 from RatesAnalysis.Util import getTableName
379 L1Table = getTableName("L1")
380 HLTTable = getTableName("HLT")
381 GroupTable = getTableName("Group")
382
383 metadata = [
384 {'PredictionLumi' : readAvgLumi(args.runNumber, args.lbStart, args.lbEnd)},
385 {'RunNumber' : args.runNumber},
386 {"First lumiblock" : args.lbStart},
387 {"Last lumiblock" : args.lbEnd},
388 {'SMK' : configMetadata[2]["SMK"]},
389 {'DB' : configMetadata[0]["DB"]},
390 {'LVL1PSK' : l1AvailableKeys},
391 {'HLTPSK' : hltAvailableKeys}
392 ]
393
394 prescaledDirName = "costMonitoring_OnlineTRPRates-onlinePS-LB{0}-{1}_{2}/".format(args.lbStart, args.lbEnd, args.runNumber)
395 unprescaledDirName = "costMonitoring_OnlineTRPRates-noPS-LB{0}-{1}_{2}/".format(args.lbStart, args.lbEnd, args.runNumber)
396 log.info("Exporting " + HLTTable)
397 hltChainsList = [hltChains[chain].getCsvEntry() for chain in hltChains]
398 hltChainsUnpsList = [hltChains[chain].getUnprescaledCsvEntry() for chain in hltChains]
399 toCSV(HLTTable, prescaledDirName + "csv/", hltChainsList)
400 toCSV(HLTTable, unprescaledDirName + "csv/", hltChainsUnpsList)
401
402 log.info("Exporting " + L1Table)
403 l1ItemsList = [l1Items[item].getCsvEntry() for item in l1Items]
404 l1ItemsUnpsList = [l1Items[item].getUnprescaledCsvEntry() for item in l1Items]
405 toCSV(L1Table, prescaledDirName + "csv/", l1ItemsList)
406 toCSV(L1Table, unprescaledDirName + "csv/", l1ItemsUnpsList)
407
408 log.info("Exporting " + GroupTable)
409 groupsList = [groups[group].getCsvEntry() for group in groups]
410 toCSV(GroupTable, prescaledDirName + "csv/", groupsList)
411
412 prescaledMd = [*metadata, {"Details" : "Averaged rates with online prescales from online monitoring"}]
413 unprescaledMd = [*metadata, {"Details" : "Averaged rates with prescales removed from online monitoring"}]
414 saveMetadata(prescaledDirName, prescaledMd)
415 saveMetadata(unprescaledDirName, unprescaledMd)
416
417
std::string replace(std::string s, const std::string &s2, const std::string &s3)
Definition hcg.cxx:310
int main()
Definition hello.cxx:18

◆ readAvgLumi()

python.RatesAnalysisOnlineProcessing.readAvgLumi ( runNumber,
lbStart,
lbEnd )
Read Inst Lumi and calculate the average for given run number and lumiblock ranges

Definition at line 191 of file RatesAnalysisOnlineProcessing.py.

191def readAvgLumi(runNumber, lbStart, lbEnd):
192 '''
193 Read Inst Lumi and calculate the average for given run number and lumiblock ranges
194 '''
195 from PyCool import cool
196 from DQUtils.sugar import RunLumi
197 from TrigConfStorage.TriggerCoolUtil import TriggerCoolUtil
198 db = TriggerCoolUtil.GetConnection('CONDBR2')
199 folder = db.getFolder("/TRIGGER/LUMI/OnlPrefLumi")
200 folderIterator = folder.browseObjects(RunLumi(runNumber, lbStart), RunLumi(runNumber, lbEnd), cool.ChannelSelection())
201
202 avg = 0
203 counter = 0
204 while folderIterator.goToNext():
205 payload=folderIterator.currentRef().payload()
206 avg += payload["LBAvInstLumi"]
207 counter +=1
208
209 if counter == 0:
210 log.error("No entries in COOL database, in /TRIGGER/LUMI/OnlPrefLumi folder were found for run {0}, lumiblocks {1}-{2}".format(runNumber, lbStart, lbEnd))
211 return 0
212
213 log.debug("Avg inst lumi {0} for {1} events".format((avg/counter), counter))
214
215 return avg/counter * 1e30
216

◆ readChainsGroups()

python.RatesAnalysisOnlineProcessing.readChainsGroups ( smkey = "",
dbAlias = "" )
Returns dictionary of chain and it's groups based on HLT Menu

Definition at line 90 of file RatesAnalysisOnlineProcessing.py.

90def readChainsGroups(smkey="", dbAlias=""):
91 '''
92 Returns dictionary of chain and it's groups based on HLT Menu
93 '''
94 log.debug("Reading HLT menu from {0} {1}".format(smkey, dbAlias))
95 from TrigConfIO.HLTTriggerConfigAccess import HLTMenuAccess
96 hltChains = HLTMenuAccess(dbalias = dbAlias, smkey = smkey).chains()
97
98 groups = {}
99 for chainName in hltChains:
100 groups[chainName] = " ".join(hltChains[chainName]["groups"])
101
102 return groups
103
104

◆ readHLTPrescales()

python.RatesAnalysisOnlineProcessing.readHLTPrescales ( pskey = "",
dbAlias = "" )
Returns dictionary of chain and it's prescale

Definition at line 105 of file RatesAnalysisOnlineProcessing.py.

105def readHLTPrescales(pskey="", dbAlias=""):
106 '''
107 Returns dictionary of chain and it's prescale
108 '''
109 log.debug("Reading HLT prescales set from {0} {1}".format(pskey, dbAlias))
110 from TrigConfIO.HLTTriggerConfigAccess import HLTPrescalesSetAccess
111 prescalesSet = HLTPrescalesSetAccess(dbalias = dbAlias, hltpskey = pskey).prescales()
112
113 prescales = {}
114 for chainName in prescalesSet:
115 prescales[chainName] = prescalesSet[chainName]["prescale"]
116
117 return prescales
118
119

◆ readL1Prescales()

python.RatesAnalysisOnlineProcessing.readL1Prescales ( pskey = "",
dbAlias = "" )
Returns dictionary of item and it's prescale

Definition at line 120 of file RatesAnalysisOnlineProcessing.py.

120def readL1Prescales(pskey="", dbAlias=""):
121 '''
122 Returns dictionary of item and it's prescale
123 '''
124 log.debug("Reading L1 prescales set from {0} {1}".format(pskey, dbAlias))
125 from TrigConfIO.L1TriggerConfigAccess import L1PrescalesSetAccess
126 prescalesAccess = L1PrescalesSetAccess(dbalias = dbAlias, l1pskey = pskey)
127
128 prescales = {}
129 for itemName in prescalesAccess.itemNames():
130 prescales[itemName] = prescalesAccess.prescale(itemName)
131
132 return prescales
133
134

◆ readLbEndFromCool()

python.RatesAnalysisOnlineProcessing.readLbEndFromCool ( runNumber,
lb )
Returns end of the lumiblock from given run in microseconds

Definition at line 62 of file RatesAnalysisOnlineProcessing.py.

62def readLbEndFromCool(runNumber, lb):
63 '''
64 Returns end of the lumiblock from given run in microseconds
65 '''
66 timestamp = readLbFromCool(runNumber, lb)["EndTime"]
67
68 # Returns value UTC nanoseconds since 1970 - convert to microseconds (used by IS)
69 return int(timestamp/1000) if timestamp else -1
70
71

◆ readLbFromCool()

python.RatesAnalysisOnlineProcessing.readLbFromCool ( runNumber,
lb )
Returns payload for a lumiblock from given run read from COOL database
Based on TriggerCoolUtil.getRunStartTime

Definition at line 72 of file RatesAnalysisOnlineProcessing.py.

72def readLbFromCool(runNumber, lb):
73 '''
74 Returns payload for a lumiblock from given run read from COOL database
75 Based on TriggerCoolUtil.getRunStartTime
76 '''
77 from TrigConfStorage.TriggerCoolUtil import TriggerCoolUtil
78 dbconn = TriggerCoolUtil.GetConnection("CONDBR2")
79 f = dbconn.getFolder( "/TRIGGER/LUMI/LBLB" )
80
81 limmin = RunLumi(runNumber, lb)
82 limmax = RunLumi(runNumber, lb+1)
83
84 from PyCool import cool
85 objs = f.browseObjects(limmin, limmax, cool.ChannelSelection(0))
86 objs.goToNext()
87 return objs.currentRef().payload()
88
89

◆ readLbStartFromCool()

python.RatesAnalysisOnlineProcessing.readLbStartFromCool ( runNumber,
lb )
Returns start of the lumiblock from given run in microseconds

Definition at line 52 of file RatesAnalysisOnlineProcessing.py.

52def readLbStartFromCool(runNumber, lb):
53 '''
54 Returns start of the lumiblock from given run in microseconds
55 '''
56 timestamp = readLbFromCool(runNumber, lb)["StartTime"]
57
58 # Returns value UTC nanoseconds since 1970 - convert to microseconds (used by IS)
59 return int(timestamp/1000) if timestamp else -1
60
61

◆ saveMetadata()

python.RatesAnalysisOnlineProcessing.saveMetadata ( dirName,
data )
Save metadata dictionary

Definition at line 176 of file RatesAnalysisOnlineProcessing.py.

176def saveMetadata(dirName, data):
177 '''
178 Save metadata dictionary
179 '''
180
181 mdDict = {}
182 mdDict['text'] = 'metadata'
183 mdDict['children'] = data
184
185 outFilename = dirName + '/' + "metadata.json"
186 with open(outFilename, mode='w') as outputFile:
187 import json
188 json.dump(obj=mdDict, fp=outputFile, indent=2, sort_keys=True)
189
190

◆ toCSV()

python.RatesAnalysisOnlineProcessing.toCSV ( fileName,
dirName,
data )
Save rates to csv file

Definition at line 159 of file RatesAnalysisOnlineProcessing.py.

159def toCSV(fileName, dirName, data):
160 '''
161 Save rates to csv file
162 '''
163 import csv, os
164
165 os.makedirs(dirName, exist_ok=True)
166 with open(dirName + '/' + fileName, mode='w') as outputFile:
167 ratesWriter = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
168
169 ratesWriter.writerow(['Name','Group','Rate [Hz]','Rate Err [Hz]', 'Prescale'])
170 ratesWriter.writerow(['Trigger name','The group this chain belongs to','Online rate','Error on rate','The prescale of this chain. Only displayed for simple combinations'])
171
172 for trig in data:
173 ratesWriter.writerow(trig)
174
175

Variable Documentation

◆ log

python.RatesAnalysisOnlineProcessing.log = logging.getLogger('RatesAnalysisOnlineProcessing')

Definition at line 15 of file RatesAnalysisOnlineProcessing.py.