7 @file RatesAnalysisOnlineProcessing.py 
    8 @brief Script to create summaries with online P1 rates retrieved from pbeast, prescales and unprescaled. 
    9        Used for validation of Enhanced Bias weighting. 
   10        For the pbeast authentication use: export PBEAST_SERVER_SSO_SETUP_TYPE=AutoUpdateKerberos 
   12 from DQUtils.sugar 
import RunLumi
 
   14 from AthenaCommon.Logging 
import logging
 
   15 log = logging.getLogger(
'RatesAnalysisOnlineProcessing')
 
   54     Returns start of the lumiblock from given run in microseconds 
   59     return int(timestamp/1000) 
if timestamp 
else -1
 
   64     Returns end of the lumiblock from given run in microseconds 
   69     return int(timestamp/1000) 
if timestamp 
else -1
 
   74     Returns payload for a lumiblock from given run read from COOL database 
   75     Based on TriggerCoolUtil.getRunStartTime 
   77     from TrigConfStorage.TriggerCoolUtil 
import TriggerCoolUtil
 
   78     dbconn = TriggerCoolUtil.GetConnection(
"CONDBR2")
 
   79     f = dbconn.getFolder( 
"/TRIGGER/LUMI/LBLB" )
 
   82     limmax = 
RunLumi(runNumber, lb+1)
 
   84     from PyCool 
import cool
 
   85     objs = f.browseObjects(limmin, limmax, cool.ChannelSelection(0))
 
   87     return objs.currentRef().
payload()
 
   92     Returns dictionary of chain and it's groups based on HLT Menu 
   94     log.debug(
"Reading HLT menu from {0} {1}".
format(smkey, dbAlias))
 
   95     from TrigConfIO.HLTTriggerConfigAccess 
import HLTMenuAccess
 
   96     hltChains = HLTMenuAccess(dbalias = dbAlias, smkey = smkey).
chains()
 
   99     for chainName 
in hltChains:
 
  100         groups[chainName] = 
" ".
join(hltChains[chainName][
"groups"])
 
  107     Returns dictionary of chain and it's prescale 
  109     log.debug(
"Reading HLT prescales set from {0} {1}".
format(pskey, dbAlias))
 
  110     from TrigConfIO.HLTTriggerConfigAccess 
import HLTPrescalesSetAccess
 
  111     prescalesSet = HLTPrescalesSetAccess(dbalias = dbAlias, hltpskey = pskey).
prescales()
 
  114     for chainName 
in prescalesSet:
 
  115         prescales[chainName] = prescalesSet[chainName][
"prescale"]
 
  122     Returns dictionary of item and it's prescale 
  124     log.debug(
"Reading L1 prescales set from {0} {1}".
format(pskey, dbAlias))
 
  125     from TrigConfIO.L1TriggerConfigAccess 
import L1PrescalesSetAccess
 
  126     prescalesAccess = L1PrescalesSetAccess(dbalias = dbAlias, l1pskey = pskey)
 
  129     for itemName 
in prescalesAccess.itemNames():
 
  130         prescales[itemName] = prescalesAccess.prescale(itemName)
 
  137     Retrieve all prescale keys combinations within given lumiblocks 
  140     it = 
iter(
"".
join(c 
for c 
in psk 
if c 
not in "()[] ").
split(
","))
 
  141     pskList = [(
int(x), 
int(y), 
int(z)) 
for x, y, z 
in zip(it, it, it)]
 
  143     log.debug(
"Reading the key ranges for {0}".
format(psk))
 
  147     for prescaleEntry 
in pskList:
 
  148         if prescaleEntry[1] <= lbEnd 
and prescaleEntry[2] >= lbStart:
 
  149             log.debug(
"Found prescale key in the range: {0}".
format(prescaleEntry))
 
  150             foundKeys.append(prescaleEntry)
 
  153         log.warning(
"Cannot find one prescale for lumiblocks {0} to {1}. Available values: {2}".
format(lbStart, lbEnd, pskList))
 
  161     Save rates to csv file 
  165     os.makedirs(dirName, exist_ok=
True)
 
  166     with open(dirName + 
'/' + fileName, mode=
'w') 
as outputFile:
 
  167         ratesWriter = csv.writer(outputFile, delimiter=
',', quotechar=
'"', quoting=csv.QUOTE_MINIMAL)
 
  169         ratesWriter.writerow([
'Name',
'Group',
'Rate [Hz]',
'Rate Err [Hz]', 
'Prescale'])
 
  170         ratesWriter.writerow([
'Trigger name',
'The group this chain belongs to',
'Online rate',
'Error on rate',
'The prescale of this chain. Only displayed for simple combinations'])
 
  173             ratesWriter.writerow(trig)
 
  178     Save metadata dictionary 
  182     mdDict[
'text'] = 
'metadata' 
  183     mdDict[
'children'] = data
 
  185     outFilename = dirName + 
'/' + 
"metadata.json" 
  186     with open(outFilename, mode=
'w') 
as outputFile:
 
  188         json.dump(obj=mdDict, fp=outputFile, indent=2, sort_keys=
True)
 
  193     Read Inst Lumi and calculate the average for given run number and lumiblock ranges 
  195     from PyCool 
import cool
 
  196     from DQUtils.sugar 
import RunLumi
 
  197     from TrigConfStorage.TriggerCoolUtil 
import TriggerCoolUtil
 
  198     db = TriggerCoolUtil.GetConnection(
'CONDBR2')
 
  199     folder = db.getFolder(
"/TRIGGER/LUMI/OnlPrefLumi")
 
  200     folderIterator = folder.browseObjects(
RunLumi(runNumber, lbStart), 
RunLumi(runNumber, lbEnd), cool.ChannelSelection())    
 
  204     while folderIterator.goToNext():
 
  205         payload=folderIterator.currentRef().
payload()
 
  206         avg += payload[
"LBAvInstLumi"]
 
  210         log.error(
"No entries in COOL database, in /TRIGGER/LUMI/OnlPrefLumi folder were found for run {0}, lumiblocks {1}-{2}".
format(runNumber, lbStart, lbEnd))
 
  213     log.debug(
"Avg inst lumi {0} for {1} events".
format((avg/counter), counter))
 
  215     return avg/counter * 1e30
 
  218     from argparse 
import ArgumentParser
 
  219     parser = ArgumentParser()
 
  220     parser.add_argument(
'--runNumber', required=
True, type=int, 
 
  221                         help=
'Number of run to process')
 
  222     parser.add_argument(
'--lbStart', required=
True, type=int, 
 
  223                         help=
'First lumiblock to record Enhanced Bias data')
 
  224     parser.add_argument(
'--lbEnd', required=
True, type=int, 
 
  225                         help=
'Last lumiblock to record Enhanced Bias data')
 
  226     parser.add_argument(
'-s',
'--server', default=
'https://atlasop.cern.ch',
 
  227                         help=
"Pbeast server url. For GPN: https://atlasop.cern.ch, for P1: http://pc-tdq-bst-05.cern.ch:8080")
 
  228     parser.add_argument(
'--loglevel', type=int, default=3, 
 
  229                         help=
'Verbosity level: 1 - VERBOSE, 2 - DEBUG, 3 - INFO')
 
  230     args = parser.parse_args()
 
  231     log.setLevel(args.loglevel)
 
  236     from time 
import ctime
 
  237     log.info(
"Rates will be retrieved for lumiblocks {0}-{1}: {2} - {3}".
format(args.lbStart, args.lbEnd, ctime(startOfRange/1E6), ctime(endOfRange/1E6)))
 
  238     log.debug(
"Start and end timestamps: {0} {1}".
format(startOfRange, endOfRange))
 
  242     from TrigCostAnalysis.CostMetadataUtil 
import readHLTConfigKeysFromCOOL
 
  246     hltAvailableKeys = 
findKeysForRange(configMetadata[3][
"HLTPSK"], args.lbStart, args.lbEnd)
 
  247     l1AvailableKeys = 
findKeysForRange(configMetadata[4][
"LVL1PSK"], args.lbStart, args.lbEnd)
 
  253     for hltEntry 
in hltAvailableKeys:
 
  254         hltLbStart = hltEntry[1]
 
  255         hltLbEnd = hltEntry[2]
 
  257         for l1Entry 
in l1AvailableKeys:
 
  258             l1LbStart = l1Entry[1]
 
  261             if (hltLbStart >= l1LbStart) 
and (hltLbEnd >= l1LbEnd) 
and (hltLbStart <= l1LbEnd):
 
  262                 keyRangesList.append((l1Entry[0], hltEntry[0], hltLbStart, l1LbEnd))
 
  263             elif (hltLbStart <= l1LbStart) 
and (hltLbEnd <= l1LbEnd) 
and (l1LbStart <= hltLbEnd):
 
  264                 keyRangesList.append((l1Entry[0], hltEntry[0], l1LbStart, hltLbEnd))
 
  265             elif (hltLbStart <= l1LbStart) 
and (hltLbEnd >= l1LbEnd):
 
  266                 keyRangesList.append((l1Entry[0], hltEntry[0], l1LbStart, l1LbEnd))
 
  267             elif (hltLbStart >= l1LbStart) 
and (hltLbEnd <= l1LbEnd):
 
  268                 keyRangesList.append((l1Entry[0], hltEntry[0], hltLbStart, hltLbEnd))
 
  270     log.debug(
"Available key ranges are {0}".
format(keyRangesList))
 
  271     chainGroups = 
readChainsGroups(configMetadata[2][
"SMK"], configMetadata[0][
"DB"])
 
  276         pbeast = libpbeastpy.ServerProxy(args.server)
 
  277     except ImportError 
as e:
 
  278         log.error(
"Exeption when reading the pbeast information. Remember to setup the tdaq release!\n{0}".
format(e))
 
  284     for keysRange 
in keyRangesList:
 
  286         lbStart = keysRange[2] 
if keysRange[2] > args.lbStart 
else args.lbStart
 
  287         lbEnd = keysRange[3] 
if keysRange[3] < args.lbEnd 
else args.lbEnd
 
  290         log.debug(
"Current range is {0}-{1}. Timestamps are {2}-{3}".
format(lbStart, lbEnd, ctime(startOfKeysRange/1E6), ctime(endOfKeysRange/1E6)))
 
  298             hltRates =  pbeast.get_data(
'ATLAS', 
'HLT_Rate', 
'Output', 
'ISS_TRP.HLT_.*', 
True, startOfKeysRange, endOfKeysRange)[0].data
 
  299             l1Rates =  pbeast.get_data(
'ATLAS', 
'L1_Rate', 
'TAV', 
'ISS_TRP.L1_.*', 
True, startOfKeysRange, endOfKeysRange)[0].data
 
  300             streamRates = pbeast.get_data(
'ATLAS', 
'HLT_Rate', 
'Output', 
'ISS_TRP.str_.*', 
True, startOfKeysRange, endOfKeysRange)[0].data
 
  301             groupRates = pbeast.get_data(
'ATLAS', 
'HLT_Rate', 
'Output', 
'ISS_TRP.grp_.*', 
True, startOfKeysRange, endOfKeysRange)[0].data
 
  302             recordingRates = pbeast.get_data(
'ATLAS', 
'SFOngCounters', 
'WritingEventRate', 
'DF.TopMIG-IS:HLT.Counters.*', 
True, startOfKeysRange, endOfKeysRange)[0].data
 
  303         except RuntimeError 
as e:
 
  304             log.error(
"Exception when reading the pbeast information. Remember to export the pbeast server sso!\n{0}".
format(e))
 
  307         for chain 
in hltRates:
 
  308             chainName = chain.replace(
"ISS_TRP.", 
"")
 
  309             if chainName 
not in chainGroups:
 
  310                 log.warning(
"Chain {0} is missing from the current menu".
format(chainName))
 
  313             l1Item = 
"L1_" + chainName.split(
"L1")[1]
 
  314             l1psk = l1Prescales[l1Item] 
if l1Item 
in l1Prescales 
else "-" 
  315             keysStr = 
"L1:{0} HLT:{1}".
format(l1psk, hltPrescales[chainName])
 
  316             if chainName 
not in hltChains:
 
  317                 hltChains[chainName] = 
RateEntry(chainName, chainGroups[chainName], keysStr)
 
  319                 hltChains[chainName].appendKeys(keysStr)
 
  321             for dataPoint 
in hltRates[chain]:
 
  322                 if dataPoint.value == 0:
 
  325                 elif dataPoint.ts < startOfKeysRange 
or dataPoint.ts > endOfKeysRange:
 
  329                 rate = dataPoint.value
 
  330                 if rate > 0 
and hltPrescales[chainName] <=0:
 
  331                     log.warning(
"Rate for disabled chain {0} is higher than 0! {1} timestamp {2}".
format(chainName, rate, ctime(dataPoint.ts/1E6)))
 
  333                 rateUn = rate * hltPrescales[chainName] * (l1Prescales[l1Item] 
if l1Item 
in l1Prescales 
else 1)
 
  334                 hltChains[chainName].appendRate(rate, rateUn)
 
  337             itemName = item.replace(
"ISS_TRP.", 
"")
 
  338             if "--enabled" not in itemName:
 
  341                 itemName = itemName.replace(
"--enabled", 
"")
 
  343             if itemName 
not in l1Items:
 
  344                 l1Items[itemName] = 
RateEntry(itemName, 
"-", 
"L1:{0}".
format(l1Prescales[itemName]))
 
  346                 l1Items[itemName].appendKeys(
"L1:{0}".
format(l1Prescales[itemName]))
 
  348             for dataPoint 
in l1Rates[item]:
 
  349                 if dataPoint.value == 0:
 
  351                 elif dataPoint.ts < startOfKeysRange 
or dataPoint.ts > endOfKeysRange:
 
  353                 rate = dataPoint.value
 
  354                 if rate > 0 
and l1Prescales[itemName] <=0:
 
  355                     log.warning(
"Rate for disabled chain {0} is higher than 0! {1} timestamp {2}".
format(chainName, rate, ctime(dataPoint.ts/1E6)))
 
  356                 rateUn = rate * l1Prescales[itemName]
 
  358                 l1Items[itemName].appendRate(rate, rateUn)
 
  360         groupRates = groupRates | streamRates | recordingRates
 
  361         for group 
in groupRates:
 
  362             groupName = group.replace(
"ISS_TRP.", 
"") \
 
  365                             .
replace(
"DF.TopMIG-IS:HLT.Counters.", 
"HLT_recording_")     
 
  367             if groupName 
not in groups:    
 
  368                 groups[groupName] = 
RateEntry(groupName, 
"-", 
"Multiple")
 
  370             for dataPoint 
in groupRates[group]:
 
  371                 if dataPoint.value == 0:
 
  373                 elif dataPoint.ts < startOfKeysRange 
or dataPoint.ts > endOfKeysRange:
 
  375                 groups[groupName].appendRate(dataPoint.value, 0)
 
  378     from RatesAnalysis.Util 
import getTableName
 
  384         {
'PredictionLumi' : 
readAvgLumi(args.runNumber, args.lbStart, args.lbEnd)},
 
  385         {
'RunNumber' : args.runNumber},
 
  386         {
"First lumiblock" : args.lbStart},
 
  387         {
"Last lumiblock" : args.lbEnd},
 
  388         {
'SMK' :  configMetadata[2][
"SMK"]},
 
  389         {
'DB' : configMetadata[0][
"DB"]},
 
  390         {
'LVL1PSK' :  l1AvailableKeys},
 
  391         {
'HLTPSK' : hltAvailableKeys}
 
  394     prescaledDirName = 
"costMonitoring_OnlineTRPRates-onlinePS-LB{0}-{1}_{2}/".
format(args.lbStart, args.lbEnd, args.runNumber)
 
  395     unprescaledDirName = 
"costMonitoring_OnlineTRPRates-noPS-LB{0}-{1}_{2}/".
format(args.lbStart, args.lbEnd, args.runNumber)
 
  396     log.info(
"Exporting " + HLTTable)
 
  397     hltChainsList = [hltChains[chain].getCsvEntry() 
for chain 
in hltChains]
 
  398     hltChainsUnpsList = [hltChains[chain].getUnprescaledCsvEntry() 
for chain 
in hltChains]
 
  399     toCSV(HLTTable, prescaledDirName + 
"csv/", hltChainsList)
 
  400     toCSV(HLTTable, unprescaledDirName + 
"csv/", hltChainsUnpsList)
 
  402     log.info(
"Exporting " + L1Table)
 
  403     l1ItemsList = [l1Items[item].getCsvEntry() 
for item 
in l1Items]
 
  404     l1ItemsUnpsList = [l1Items[item].getUnprescaledCsvEntry() 
for item 
in l1Items]
 
  405     toCSV(L1Table, prescaledDirName + 
"csv/", l1ItemsList)
 
  406     toCSV(L1Table, unprescaledDirName + 
"csv/", l1ItemsUnpsList)
 
  408     log.info(
"Exporting " + GroupTable)
 
  409     groupsList = [groups[group].getCsvEntry() 
for group 
in groups]
 
  410     toCSV(GroupTable, prescaledDirName + 
"csv/", groupsList)
 
  412     prescaledMd = [*metadata, {
"Details" : 
"Averaged rates with online prescales from online monitoring"}]
 
  413     unprescaledMd = [*metadata, {
"Details" : 
"Averaged rates with prescales removed from online monitoring"}]
 
  418 if __name__== 
"__main__":