3 from copy
import deepcopy
6 from AthenaCommon.Logging
import logging
7 from AthenaConfiguration.AutoConfigFlags
import GetFileMD
11 if not len(inputFiles):
13 rawCollections = [type_key[1]
for type_key
in GetFileMD(inputFiles).
get(
"itemList",[])]
14 collections = [col
for col
in rawCollections
if not col.endswith(
'Aux.') ]
18 def pileUpCalc(nSignalEvts, refreshRate, nSubEvtPerBunch, nBunches):
19 """Returns the toal number of needed events"""
20 totalSubEvts = nBunches * nSubEvtPerBunch
21 totalSubEvts += totalSubEvts * refreshRate * nSignalEvts
26 """Get number of events in a PU file"""
27 nBkgEventsPerFile = 5000
29 from PyUtils.MetaReader
import read_metadata
31 metadata = metadata[initialList[0]]
32 nBkgEventsPerFile =
int(metadata[
'nentries'])
33 logger.debug(
'{} -> __Test__001__:\n{}'.
format(__file__, nBkgEventsPerFile))
34 logger.info(
'Number of background events per file (read from file) = %s.', nBkgEventsPerFile)
38 logger.warning(
'Failed to count the number of background events in %s.'
39 'Assuming 5000 - if this is an overestimate the job may die.', initialList[0])
40 return nBkgEventsPerFile
44 """Calculate random offset into the input PU files"""
45 logger = logging.getLogger(
"PileUp")
48 if flags.Input.JobNumber >= 0:
55 offsetrnd =
int(flags.Input.JobNumber + nBkgEventsPerFile * len(initialList))
56 offsetrnd = offsetrnd ^ (offsetrnd << 13)
57 offsetrnd = offsetrnd ^ (offsetrnd >> 17)
58 offsetrnd = offsetrnd ^ (offsetrnd << 15)
59 offsetrnd = offsetrnd % (nBkgEventsPerFile * len(initialList))
61 logger.info(
'Event offset into the collection = %s', offsetrnd)
67 """Preparing the list of required input PU files"""
68 logger = logging.getLogger(
"PileUp")
73 if flags.Exec.MaxEvents > 0:
74 nSignalEvts =
int(flags.Exec.MaxEvents)
75 logger.info(
'Number of signal events (from Exec.MaxEvents) = %s.', nSignalEvts)
78 from PyUtils.MetaReader
import read_metadata
79 for inFile
in list(flags.Input.Files):
82 metadata = metadata[inFile]
83 nSignalEvts +=
int(metadata[
'nentries'])
84 logger.debug(
'{} -> __Test__001__:\n{}'.
format(__file__, nSignalEvts))
85 except Exception
as err:
86 logger.warning(
"Unable to open file [%s]", inFile)
87 logger.warning(
'caught:\n%s', err)
90 logger.info(
'Number of signal events (read from files) = %s.', nSignalEvts)
93 nBunchesTotal =
int(1 + flags.Digitization.PU.FinalBunchCrossing - flags.Digitization.PU.InitialBunchCrossing)
94 nBunches = nBunchesTotal
95 if correctForEmptyBunchCrossings:
96 nBunches =
int(ceil(
float(nBunches) *
float(flags.Digitization.PU.BunchSpacing)) /
float(flags.Beam.BunchSpacing))
97 logger.info(
'Simulating a maximum of %s colliding-bunch crossings (%s colliding+non-colliding total) per signal event', nBunches, nBunchesTotal)
101 eventOffset = flags.Digitization.PU.HighPtMinBiasInputColOffset
if flags.Digitization.PU.HighPtMinBiasInputColOffset > 0
else 0
102 nBkgEventsForJob += eventOffset
103 logger.info(
'Number of background events required: %s, including %s for the offset. Number of background events in input files: %s',
104 nBkgEventsForJob, eventOffset, (nBkgEventsPerFile * len(initialList)))
105 numberOfRepetitionsRequiredTmp =
float(nBkgEventsForJob) /
float(nBkgEventsPerFile * len(initialList))
106 numberOfRepetitionsRequired = 1 +
int(ceil(numberOfRepetitionsRequiredTmp))
108 for i
in range(0, numberOfRepetitionsRequired):
109 finalList += initialList
110 logger.info(
'Expanding input list from %s to %s',
111 len(initialList), len(finalList))
116 """Load pile-up profile from file."""
117 parts = fragment_string.split(
'.')
119 raise ValueError(
'Pile-up profile configuration should be of the form Package.Module')
121 from importlib
import import_module
122 loaded_module = import_module(fragment_string)
123 return loaded_module.setupProfile(flags)
128 randomMuSampling=False,
129 sequentialEventNumbers=False,
130 doNotCorrectMaxEvents=False):
131 """Generate pile-up profile"""
132 logger = logging.getLogger(
"PileUp")
133 logger.info(
'Doing RunLumiOverride configuration from file.')
135 jobNumber = flags.Input.JobNumber
136 maxEvents = flags.Exec.MaxEvents
137 totalEvents = flags.Exec.MaxEvents
138 skipEvents = flags.Exec.SkipEvents
141 if flags.ExecutorSplitting.TotalSteps > 1:
142 totalEvents = flags.ExecutorSplitting.TotalEvents
145 raise ValueError(
"maxEvents = -1 is not supported! Please set this to the number of events per file times the number of files per job.")
146 if not doNotCorrectMaxEvents
and not flags.ExecutorSplitting.TotalSteps > 1:
148 corrMaxEvents = ceil(
float(maxEvents) / 100.0) * 100.0
150 if not flags.ExecutorSplitting.TotalSteps > 1:
151 logger.warning(
"Using the actual number of HITS input events for this job -- not for production use!")
152 corrMaxEvents = maxEvents
161 if flags.ExecutorSplitting.TotalSteps > 1:
162 generatedProfile =
list(
filter(
lambda lb:
'step' not in lb
or lb[
'step'] == flags.ExecutorSplitting.Step, generatedProfile))
164 runMaxEvents =
sum(lb[
"evts"]
for lb
in generatedProfile)
165 logger.info(
"There are %d events in this run.", runMaxEvents)
166 jobsPerRun =
int(ceil(
float(runMaxEvents) / corrMaxEvents))
167 logger.info(
"Assuming there are usually %d events per job. (Based on %d events in this job.)",
168 corrMaxEvents, maxEvents)
169 logger.info(
"There must be %d jobs per run.", jobsPerRun)
172 if sequentialEventNumbers:
173 logger.info(
"All event numbers will be sequential.")
177 logger.info(
"Mu values will be sampled randomly from the set profile.")
179 from RunDependentSimComps.RunDependentMCTaskIterator
import getRandomlySampledRunLumiInfoFragment
181 jobnumber=(jobNumber - 1),
182 task=generatedProfile,
184 totalEvents=totalEvents,
185 skipEvents=skipEvents,
186 sequentialEventNumbers=sequentialEventNumbers)
189 from RunDependentSimComps.RunDependentMCTaskIterator
import getRunLumiInfoFragment
191 jobnumber=(jobNumber - 1),
192 task=generatedProfile,
194 totalEvents=totalEvents,
195 skipEvents=skipEvents,
196 sequentialEventNumbers=sequentialEventNumbers)
199 for element
in fragment:
200 if element[
'evts'] == 0:
201 logger.warning(
'Found lumiblock with no events! This lumiblock will not be used:\n (' + element.__str__() +
')' )
202 fragment = [x
for x
in fragment
if x[
'evts'] != 0]
204 from RunDependentSimComps.RunLumiConfigTools
import condenseRunLumiInfoFragment
205 logger.info(
"Writing RunDMC trigger configuration fragment to file. listOfRunsEvents = %s",
208 flags.Input.RunAndLumiOverrideList = fragment
213 sequentialEventNumbers=False,
214 doNotCorrectMaxEvents=False):
215 """Generate RunAndLumiOverrideList """
216 logger = logging.getLogger(
"PileUp")
217 logger.info(
'Doing RunLumiOverride configuration from file.')
219 jobNumber = flags.Input.JobNumber
220 maxEvents = flags.Exec.MaxEvents
221 totalEvents = flags.Exec.MaxEvents
222 skipEvents = flags.Exec.SkipEvents
225 if flags.ExecutorSplitting.TotalSteps > 1:
226 totalEvents = flags.ExecutorSplitting.TotalEvents
229 errorMessage =
"maxEvents = -1 is not supported! Please set this to the number of events per file times the number of files per job."
230 raise SystemExit(errorMessage)
231 if not doNotCorrectMaxEvents
and not flags.ExecutorSplitting.TotalSteps > 1:
233 corrMaxEvents = ceil(
float(maxEvents) / 25.0) * 25.0
236 "Using the actual number of EVNT input events for this job -- not for production use!")
237 corrMaxEvents = maxEvents
245 profileTotalEvents =
sum(lb[
'evts']
for lb
in tempProfile)
246 corrTotalEvents =
max(maxEvents, 50)
247 scaleTaskLengthSim =
float(corrTotalEvents) /
float(profileTotalEvents)
249 generatedProfile = []
254 return int(scaleTaskLengthSim * x)
256 for el
in tempProfile:
257 if el[
'step'] != step:
258 if cacheElement
is not None:
259 cacheElement[
'evts'] = simEvts(cacheElement[
'evts'])
260 generatedProfile += [cacheElement]
262 cacheElement = deepcopy(el)
263 cacheElement[
'mu'] = step
265 cacheElement[
'evts'] += el[
'evts']
266 cacheElement[
'evts'] = simEvts(cacheElement[
'evts'])
267 generatedProfile += [cacheElement]
269 runMaxEvents =
sum(lb[
"evts"]
for lb
in generatedProfile)
270 logger.info(
"There are %d events in this run.", runMaxEvents)
271 jobsPerRun =
int(ceil(
float(runMaxEvents) / corrMaxEvents))
272 logger.info(
"Assuming there are usually %d events per job. (Based on %d events in this job.)",
273 corrMaxEvents, maxEvents)
274 logger.info(
"There must be %d jobs per run.", jobsPerRun)
277 if sequentialEventNumbers:
278 logger.info(
"All event numbers will be sequential.")
281 from RunDependentSimComps.RunDependentMCTaskIterator
import getRunLumiInfoFragment
283 jobnumber=(jobNumber - 1),
284 task=generatedProfile,
286 totalEvents=totalEvents,
287 skipEvents=skipEvents,
288 sequentialEventNumbers=sequentialEventNumbers)
291 for element
in fragment:
292 if element[
'evts'] == 0:
293 logger.warning(
'Found lumiblock with no events! This lumiblock will not be used:\n (' + element.__str__() +
')' )
294 fragment =
sorted([x
for x
in fragment
if x[
'evts'] != 0], key=
lambda x: x[
'step'])
296 flags.Input.RunAndLumiOverrideList = fragment
300 """Scale the number of events per crossing to the largest value in job.
302 Note: beam halo and beam gas will NOT be scaled!"""
303 logger = logging.getLogger(
"PileUp")
305 maxMu =
max(element[
'mu']
for element
in flags.Input.RunAndLumiOverrideList)
306 if not (maxMu > 0
and flags.Digitization.PU.NumberOfCollisions):
309 scale = maxMu / flags.Digitization.PU.NumberOfCollisions
310 nCollisions = flags.Digitization.PU.NumberOfCollisions
312 flags.Digitization.PU.NumberOfCollisions = maxMu
313 logger.info(
"Changing Digitization.PU.NumberOfCollisions from %s to %s",
314 nCollisions, flags.Digitization.PU.NumberOfCollisions)
316 if flags.Digitization.PU.NumberOfLowPtMinBias:
317 old = flags.Digitization.PU.NumberOfLowPtMinBias
318 flags.Digitization.PU.NumberOfLowPtMinBias *= scale
319 logger.info(
"Changing Digitization.PU.NumberOfLowPtMinBias from %s to %s",
320 old, flags.Digitization.PU.NumberOfLowPtMinBias)
322 if flags.Digitization.PU.NumberOfHighPtMinBias:
323 old = flags.Digitization.PU.NumberOfHighPtMinBias
324 flags.Digitization.PU.NumberOfHighPtMinBias *= scale
325 logger.info(
"Changing Digitization.PU.NumberOfHighPtMinBias from %s to %s",
326 old, flags.Digitization.PU.NumberOfHighPtMinBias)
328 if flags.Digitization.PU.NumberOfCavern:
329 old = flags.Digitization.PU.NumberOfCavern
330 flags.Digitization.PU.NumberOfCavern *= scale
331 logger.info(
"Changing Digitization.PU.NumberOfCavern from %s to %s",
332 old, flags.Digitization.PU.NumberOfCavern)
336 bunchStructure = flags.Digitization.PU.BunchStructureConfig
339 if flags.Digitization.PU.CustomProfile:
340 if isinstance(flags.Digitization.PU.CustomProfile, str):
341 flags.Digitization.PU.CustomProfile = eval(flags.Digitization.PU.CustomProfile)
342 if isinstance(flags.Digitization.PU.CustomProfile, dict):
343 pileUpProfile =
'RunDependentSimData.PileUpProfile_muRange'
345 pileUpProfile = flags.Digitization.PU.ProfileConfig
348 if not bunchStructure:
349 raise ValueError(
'Bunch structure needs to be set')
352 parts = bunchStructure.split(
'.')
354 raise ValueError(
'Bunch structure configuration should be of the form Package.Module')
356 from importlib
import import_module
357 loaded_module = import_module(bunchStructure)
358 loaded_module.setupBunchStructure(flags)
361 flags.Digitization.PU.NumberOfCollisions = flags.Digitization.PU.NumberOfLowPtMinBias + flags.Digitization.PU.NumberOfHighPtMinBias
364 sequentialEventNumbers=flags.Digitization.PU.ForceSequentialEventNumbers)