ATLAS Offline Software
Loading...
Searching...
No Matches
BatchedMinbiasSvc Class Reference

#include <BatchedMinbiasSvc.h>

Inheritance diagram for BatchedMinbiasSvc:

Public Member Functions

 BatchedMinbiasSvc (const std::string &name, ISvcLocator *svc)
 Constructor.
 ~BatchedMinbiasSvc ()
 Destructor.
StatusCode initialize () override
 AthService initialize.
StatusCode beginHardScatter (const EventContext &ctx) override
StoreGateSvcgetMinbias (const EventContext &ctx, std::uint64_t mb_id) override
std::size_t getNumForBunch (const EventContext &ctx, int bunch) const override
virtual std::int64_t get_hs_id (const EventContext &ctx) const override
StatusCode endHardScatter (const EventContext &ctx) override

Private Types

using SGHandle = ServiceHandle<StoreGateSvc>
using SGHandleArray = std::vector<SGHandle>

Private Member Functions

int event_to_batch (std::int64_t hs_id)
std::size_t calcMBRequired (std::int64_t hs_id, std::size_t slot, unsigned int run, unsigned int lumi, std::uint64_t event)

Private Attributes

Gaudi::Property< std::uint64_t > m_seed
Gaudi::Property< bool > m_onDemandMB
Gaudi::Property< bool > m_usePoisson
Gaudi::Property< bool > m_useBeamInt
Gaudi::Property< bool > m_useBeamLumi
Gaudi::Property< int > m_MBBatchSize
Gaudi::Property< int > m_NSimultaneousBatches
Gaudi::Property< int > m_HSBatchSize
Gaudi::Property< int > m_skippedHSEvents
Gaudi::Property< float > m_nPerBunch
Gaudi::Property< int > m_earliestDeltaBC
Gaudi::Property< int > m_latestDeltaBC
Gaudi::Property< std::vector< int > > m_actualNHSEventsPerBatch
ServiceHandle< ISkipEventIdxSvcm_skipEventIdxSvc
ServiceHandle< IEvtSelector > m_bkgEventSelector
ServiceHandle< IBeamIntensitym_beamInt
ServiceHandle< IBeamLuminositym_beamLumi
ServiceHandle< ActiveStoreSvcm_activeStoreSvc
SGHandle m_spare_store
IEvtSelector::Context * m_bkg_evt_sel_ctx
std::vector< std::vector< std::uint64_t > > m_num_mb_by_bunch
std::vector< std::vector< std::uint64_t > > m_idx_lists
std::map< int, std::unique_ptr< SGHandleArray > > m_cache
std::map< int, std::mutex > m_cache_mtxs
std::mutex m_reading_batch_mtx
std::deque< std::unique_ptr< SGHandleArray > > m_empty_caches
std::mutex m_empty_caches_mtx
std::vector< std::unique_ptr< std::atomic_int > > m_batch_use_count
std::atomic_int m_last_loaded_batch

Detailed Description

Definition at line 28 of file BatchedMinbiasSvc.h.

Member Typedef Documentation

◆ SGHandle

Definition at line 49 of file BatchedMinbiasSvc.h.

◆ SGHandleArray

using BatchedMinbiasSvc::SGHandleArray = std::vector<SGHandle>
private

Definition at line 50 of file BatchedMinbiasSvc.h.

Constructor & Destructor Documentation

◆ BatchedMinbiasSvc()

BatchedMinbiasSvc::BatchedMinbiasSvc ( const std::string & name,
ISvcLocator * svc )

Constructor.

Definition at line 32 of file BatchedMinbiasSvc.cxx.

33 : base_class(name, svc),
34 m_bkg_evt_sel_ctx(nullptr),
IEvtSelector::Context * m_bkg_evt_sel_ctx
std::atomic_int m_last_loaded_batch

◆ ~BatchedMinbiasSvc()

BatchedMinbiasSvc::~BatchedMinbiasSvc ( )

Destructor.

Definition at line 37 of file BatchedMinbiasSvc.cxx.

37{}

Member Function Documentation

◆ beginHardScatter()

StatusCode BatchedMinbiasSvc::beginHardScatter ( const EventContext & ctx)
override

Definition at line 286 of file BatchedMinbiasSvc.cxx.

286 {
287 using namespace std::chrono_literals;
288 bool first_wait = true;
289 std::chrono::steady_clock::time_point cache_wait_start{};
290 std::chrono::steady_clock::time_point order_wait_start{};
291 const std::int64_t hs_id = get_hs_id(ctx);
292 const int batch = event_to_batch(hs_id);
293 calcMBRequired(hs_id, ctx.slot(),
294 ctx.eventID().run_number(), // don't need the total, only
295 ctx.eventID().lumi_block(), // need to populate the arrays
296 ctx.eventID().event_number());
297 while (true) {
298 if (m_cache.count(batch) != 0) {
299 // batch already loaded
300 // mutex prevents returning when batch is partially loaded
301 m_cache_mtxs[batch].lock();
302 m_cache_mtxs[batch].unlock();
303 return StatusCode::SUCCESS;
304 }
305 // prevent batches loading out-of-order
306 if (m_last_loaded_batch < (batch - 1)) {
307 ATH_MSG_INFO("Waiting to prevent out-of-order loading of batches");
308 order_wait_start = std::chrono::steady_clock::now();
309 while (m_last_loaded_batch < (batch - 1)) {
310 std::this_thread::sleep_for(50ms);
311 }
312 auto wait_time = std::chrono::steady_clock::now() - order_wait_start;
313 ATH_MSG_INFO(std::format(
314 "Waited {:%M:%S} to prevent out-of-order loading", wait_time));
315 }
316 // See if there are any free caches
317 // Using try_lock here to avoid reading same batch twice
318 std::unique_lock<std::mutex> empty_caches_lock (m_empty_caches_mtx,
319 std::try_to_lock);
320 if (empty_caches_lock.owns_lock()) {
321 if (m_empty_caches.empty()) {
322 // Unlock mutex if we got the lock but there were no free caches
323 empty_caches_lock.unlock();
324 if (first_wait) {
325 ATH_MSG_INFO("Waiting for a free cache");
326 first_wait = false;
327 cache_wait_start = std::chrono::steady_clock::now();
328 }
329 // Wait 100ms then try again
330 std::this_thread::sleep_for(100ms);
331 continue;
332 }
333 if (!first_wait) {
334 auto wait_time = std::chrono::steady_clock::now() - cache_wait_start;
336 std::format("Waited {:%M:%S} for a free cache", wait_time));
337 }
338 std::scoped_lock reading{m_cache_mtxs[batch], m_reading_batch_mtx};
339 if (m_HSBatchSize != 0) {
340 ATH_MSG_INFO("Reading next batch in event " << ctx.evt() << ", slot "
341 << ctx.slot() << " (hs_id "
342 << hs_id << ")");
343 }
344 auto start_time = std::chrono::system_clock::now();
345 m_cache[batch] = std::move(m_empty_caches.front());
346 m_empty_caches.pop_front();
347 // Remember old store to reset later
348 auto* old_store = m_activeStoreSvc->activeStore();
349 for (auto&& sg : *m_cache[batch]) {
350 // Change active store
351 m_activeStoreSvc->setStore(sg.get());
352 SG::CurrentEventStore::Push reader_sg_ces(sg.get());
353 // Read next event
354 ATH_CHECK(sg->clearStore(true));
355 if (!(m_bkgEventSelector->next(*m_bkg_evt_sel_ctx)).isSuccess()) {
356 ATH_MSG_FATAL("Ran out of minbias events");
357 return StatusCode::FAILURE;
358 }
359 IOpaqueAddress* addr = nullptr;
360 if (!m_bkgEventSelector->createAddress(*m_bkg_evt_sel_ctx, addr)
361 .isSuccess()) {
362 ATH_MSG_WARNING("Failed to create address. No more events?");
363 return StatusCode::FAILURE;
364 }
365 if (addr == nullptr) {
366 ATH_MSG_WARNING("createAddress returned nullptr. No more events?");
367 return StatusCode::FAILURE;
368 }
369 ATH_CHECK(sg->recordAddress(addr));
370 ATH_CHECK(sg->loadEventProxies());
371 // Read data now if desired
372 if (!m_onDemandMB) {
373 for (const auto* proxy_ptr : sg->proxies()) {
374 if (!proxy_ptr->isValid()) {
375 continue;
376 }
377
378 // Sort of a const_cast, then ->accessData()
379 sg->proxy_exact(proxy_ptr->sgkey())->accessData();
380 }
381 }
382 }
383 // Reset active store
384 m_activeStoreSvc->setStore(old_store);
385 if (m_HSBatchSize != 0) {
386 ATH_MSG_INFO(std::format(
387 "Reading {} events took {:%OMm %OSs}", m_cache[batch]->size(),
388 std::chrono::system_clock::now() - start_time));
389 }
390 m_last_loaded_batch.exchange(batch);
391 return StatusCode::SUCCESS;
392 }
393 }
394 return StatusCode::SUCCESS;
395}
#define ATH_CHECK
Evaluate an expression and check for errors.
#define ATH_MSG_FATAL(x)
#define ATH_MSG_INFO(x)
#define ATH_MSG_WARNING(x)
ServiceHandle< IEvtSelector > m_bkgEventSelector
std::map< int, std::unique_ptr< SGHandleArray > > m_cache
Gaudi::Property< bool > m_onDemandMB
int event_to_batch(std::int64_t hs_id)
ServiceHandle< ActiveStoreSvc > m_activeStoreSvc
std::deque< std::unique_ptr< SGHandleArray > > m_empty_caches
std::mutex m_reading_batch_mtx
std::mutex m_empty_caches_mtx
virtual std::int64_t get_hs_id(const EventContext &ctx) const override
Gaudi::Property< int > m_HSBatchSize
std::map< int, std::mutex > m_cache_mtxs
std::size_t calcMBRequired(std::int64_t hs_id, std::size_t slot, unsigned int run, unsigned int lumi, std::uint64_t event)

◆ calcMBRequired()

std::size_t BatchedMinbiasSvc::calcMBRequired ( std::int64_t hs_id,
std::size_t slot,
unsigned int run,
unsigned int lumi,
std::uint64_t event )
private

Definition at line 195 of file BatchedMinbiasSvc.cxx.

199 {
200 const int n_bunches = m_latestDeltaBC.value() - m_earliestDeltaBC.value() + 1;
201 FastReseededPRNG prng{m_seed.value(), hs_id};
202
203 // First apply the beam luminosity SF
204 bool sf_updated_throwaway;
205 const float beam_lumi_sf =
206 m_useBeamLumi ? m_beamLumi->scaleFactor(run, lumi, sf_updated_throwaway)
207 : 1.f;
208 std::vector<float> avg_num_mb_by_bunch(n_bunches,
209 beam_lumi_sf * m_nPerBunch.value());
210 // Now update using beam intensities
211 if (m_useBeamInt) {
212 // Supposed to be once per event, but ends up running once per minbias type
213 // per event now
214 m_beamInt->selectT0(run, event);
215 for (int bunch = m_earliestDeltaBC.value();
216 bunch <= m_latestDeltaBC.value(); ++bunch) {
217 std::size_t idx = bunch - m_earliestDeltaBC.value();
218 avg_num_mb_by_bunch[idx] *= m_beamInt->normFactor(bunch);
219 }
220 }
221
222 std::vector<std::uint64_t>& num_mb_by_bunch = m_num_mb_by_bunch[slot];
223 num_mb_by_bunch.clear();
224 num_mb_by_bunch.resize(n_bunches);
225
226 if (m_usePoisson) {
227 std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
228 num_mb_by_bunch.begin(), [&prng](float avg) {
229 return std::poisson_distribution<std::uint64_t>(avg)(prng);
230 });
231 } else {
232 std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
233 num_mb_by_bunch.begin(), [](float f) {
234 return static_cast<std::uint64_t>(std::round(f));
235 });
236 }
237
238 std::uint64_t num_mb = ranges::accumulate(num_mb_by_bunch, 0UL);
239 std::vector<std::uint64_t>& index_array = m_idx_lists[slot];
240 const std::uint64_t mbBatchSize = m_MBBatchSize.value();
241 // Prevent running out of events
242 if (num_mb > mbBatchSize) {
243 const int center_bunch = -m_earliestDeltaBC.value();
244 auto indices =
245 rv::iota(0ULL, num_mb_by_bunch.size()) |
246 rv::filter([center_bunch, &num_mb_by_bunch](int idx) {
247 bool good = idx != center_bunch; // filter out the central bunch
248 good =
249 good && num_mb_by_bunch[idx] > 0; // filter out unfilled bunches
250 return good;
251 }) |
252 ranges::to<std::vector>;
253 // sort by distance from central bunch
254 ranges::stable_sort(indices, std::greater{},
255 [center_bunch](std::size_t idx) {
256 return std::size_t(std::abs(int(idx) - center_bunch));
257 });
258 // subtract from bunches until we aren't using too many events
259 for (auto idx : indices) {
260 const std::uint64_t max_to_subtract = num_mb - mbBatchSize;
261 const std::uint64_t num_subtracted =
262 std::min(max_to_subtract, num_mb_by_bunch[idx]);
263 num_mb_by_bunch[idx] -= num_subtracted;
264 num_mb -= num_subtracted;
265 if (num_mb <= mbBatchSize) {
266 break;
267 }
268 }
269 // Print an error anyway so we can fix the job
270 ATH_MSG_ERROR("We need " << num_mb << " events but the batch size is "
271 << mbBatchSize << ". Restricting to "
272 << mbBatchSize << " events!");
273 }
274 index_array = rv::ints(0, int(mbBatchSize)) | rv::sample(num_mb, prng) |
275 ranges::to<std::vector<std::uint64_t>>;
276 ranges::shuffle(index_array, prng);
277 ATH_MSG_DEBUG("HS ID " << hs_id << " uses " << num_mb << " events");
278 // Disabled until C++ 23 range formatting can be used
279 // if (m_HSBatchSize <= 1) {
280 // ATH_MSG_DEBUG(fmt::format("\t\tBy bunch: [{}]\n", fmt::join(num_mb_by_bunch, ", "))
281 // << fmt::format("\t\tOrder: [{}]", fmt::join(index_array, ", ")));
282 // }
283 return num_mb;
284}
#define ATH_MSG_ERROR(x)
#define ATH_MSG_DEBUG(x)
Gaudi::Property< std::uint64_t > m_seed
Gaudi::Property< float > m_nPerBunch
Gaudi::Property< bool > m_useBeamLumi
std::vector< std::vector< std::uint64_t > > m_idx_lists
ServiceHandle< IBeamIntensity > m_beamInt
Gaudi::Property< int > m_earliestDeltaBC
Gaudi::Property< bool > m_usePoisson
std::vector< std::vector< std::uint64_t > > m_num_mb_by_bunch
Gaudi::Property< int > m_latestDeltaBC
Gaudi::Property< bool > m_useBeamInt
Gaudi::Property< int > m_MBBatchSize
ServiceHandle< IBeamLuminosity > m_beamLumi
std::pair< long int, long int > indices

◆ endHardScatter()

StatusCode BatchedMinbiasSvc::endHardScatter ( const EventContext & ctx)
override

Definition at line 416 of file BatchedMinbiasSvc.cxx.

416 {
417 using namespace std::chrono_literals;
418 const std::int64_t hs_id = get_hs_id(ctx);
419 const int batch = event_to_batch(hs_id);
420 const int uses = m_batch_use_count[batch]->fetch_add(1) + 1;
421
422 // If we're done with every event in the batch, clear the stores and return
423 // them
424 if (uses == m_HSBatchSize.value()) {
425 std::unique_ptr temp = std::move(m_cache[batch]);
426 m_cache.erase(batch);
427 for (auto&& sg : *temp) {
428 ATH_CHECK(sg->clearStore());
429 }
430 std::lock_guard lg{m_empty_caches_mtx};
431 m_empty_caches.emplace_back(std::move(temp));
432 } else {
433 ATH_MSG_DEBUG("BATCH " << batch << ": " << uses << " uses out of "
434 << m_HSBatchSize << " "
436 }
437 return StatusCode::SUCCESS;
438}
Gaudi::Property< std::vector< int > > m_actualNHSEventsPerBatch
std::vector< std::unique_ptr< std::atomic_int > > m_batch_use_count

◆ event_to_batch()

int BatchedMinbiasSvc::event_to_batch ( std::int64_t hs_id)
private

Definition at line 39 of file BatchedMinbiasSvc.cxx.

39 {
40 return int(hs_id / m_HSBatchSize.value());
41}

◆ get_hs_id()

virtual std::int64_t BatchedMinbiasSvc::get_hs_id ( const EventContext & ctx) const
inlineoverridevirtual

Definition at line 43 of file BatchedMinbiasSvc.h.

43 {
44 return m_skippedHSEvents.value() + ctx.evt();
45 }
Gaudi::Property< int > m_skippedHSEvents

◆ getMinbias()

StoreGateSvc * BatchedMinbiasSvc::getMinbias ( const EventContext & ctx,
std::uint64_t mb_id )
override

Definition at line 397 of file BatchedMinbiasSvc.cxx.

398 {
399 const std::int64_t hs_id = get_hs_id(ctx);
400 const std::size_t slot = ctx.slot();
401 const std::size_t index = m_idx_lists.at(slot).at(mb_id);
402 const int batch = event_to_batch(hs_id);
403 return m_cache[batch]->at(index).get();
404}
str index
Definition DeMoScan.py:362

◆ getNumForBunch()

std::size_t BatchedMinbiasSvc::getNumForBunch ( const EventContext & ctx,
int bunch ) const
override

Definition at line 406 of file BatchedMinbiasSvc.cxx.

407 {
408 if (bunch < m_earliestDeltaBC.value() || bunch > m_latestDeltaBC.value()) {
409 throw std::logic_error(std::format(
410 "Tried to request bunch {} which is outside the range [{}, {}]", bunch,
411 m_earliestDeltaBC.value(), m_latestDeltaBC.value()));
412 }
413 return m_num_mb_by_bunch.at(ctx.slot()).at(bunch - m_earliestDeltaBC.value());
414}

◆ initialize()

StatusCode BatchedMinbiasSvc::initialize ( )
override

AthService initialize.

Definition at line 43 of file BatchedMinbiasSvc.cxx.

43 {
44 ATH_CHECK(m_skipEventIdxSvc.retrieve());
45 ATH_CHECK(m_beamInt.retrieve());
46 ATH_CHECK(m_beamLumi.retrieve());
47 std::size_t n_concurrent =
48 Gaudi::Concurrency::ConcurrencyFlags::numConcurrentEvents();
49 m_idx_lists.clear();
50 m_idx_lists.resize(n_concurrent);
51
52 m_num_mb_by_bunch.clear();
53 m_num_mb_by_bunch.resize(n_concurrent);
54
55 m_cache.clear();
56 m_empty_caches.clear();
57 m_batch_use_count.clear();
58 m_batch_use_count.reserve(m_actualNHSEventsPerBatch.value().size());
59 for (std::size_t i = 0; i < m_actualNHSEventsPerBatch.value().size(); ++i) {
60 m_batch_use_count.emplace_back(std::make_unique<std::atomic_int>(0));
61 }
62 ATH_CHECK(m_bkgEventSelector.retrieve());
63 ATH_CHECK(m_activeStoreSvc.retrieve());
64 // Setup context
65 if (!m_bkgEventSelector->createContext(m_bkg_evt_sel_ctx).isSuccess()) {
66 ATH_MSG_ERROR("Failed to create background event selector context");
67 return StatusCode::FAILURE;
68 }
69 ATH_CHECK(SmartIF<IService>(m_bkgEventSelector.get())->start());
70
71 // Setup proxy provider
72 SmartIF<IProxyProviderSvc> proxyProviderSvc{
73 serviceLocator()->service(std::format("ProxyProviderSvc/BkgPPSvc_{}", name()))
74 };
75 ATH_CHECK(proxyProviderSvc.isValid());
76
77 // Setup Address Providers
78 SmartIF<IAddressProvider> addressProvider{m_bkgEventSelector.get()};
79 if (!addressProvider) {
81 "Could not cast background event selector to IAddressProvider");
82 } else {
83 proxyProviderSvc->addProvider(addressProvider);
84 }
85 // AthenaPoolAddressProviderSvc
86 SmartIF<IAddressProvider> athPoolAP{
87 serviceLocator()->service(std::format("AthenaPoolAddressProviderSvc/BkgAPAPSvc_{}", name()))
88 };
89 if (!athPoolAP) {
91 "Could not cast AthenaPoolAddressProviderSvc to IAddressProvider");
92 } else {
93 proxyProviderSvc->addProvider(athPoolAP);
94 }
95 // AddressRemappingSvc
96 SmartIF<IAddressProvider> addRemapAP{
97 serviceLocator()->service(std::format("AddressRemappingSvc/BkgARSvc_{}", name()))
98 };
99 if (!addRemapAP) {
100 ATH_MSG_WARNING("Could not cast AddressRemappingSvc to IAddressProvider");
101 } else {
102 proxyProviderSvc->addProvider(addRemapAP);
103 }
104
105 int mbBatchSize = m_MBBatchSize.value();
106 // setup NSimultaneousBatches vectors of MBBatchSize StoreGates in
107 // m_empty_caches
108 for (int i = 0; i < m_NSimultaneousBatches.value(); ++i) {
109 auto& sgs = m_empty_caches.emplace_back(std::make_unique<SGHandleArray>());
110 sgs->reserve(mbBatchSize);
111 for (int j = 0; j < mbBatchSize; ++j) {
112 // creates / retrieves a different StoreGateSvc for each slot
113 auto& sg = sgs->emplace_back(
114 std::format("StoreGateSvc/StoreGate_{}_{}_{}", name(), i, j), name());
115 ATH_CHECK(sg.retrieve());
116 sg->setStoreID(StoreID::PILEUP_STORE);
117 sg->setProxyProviderSvc(proxyProviderSvc);
118 }
119 }
120
121 // Setup the spare store for event skipping
122 ATH_CHECK(m_spare_store.retrieve());
124 m_spare_store->setProxyProviderSvc(proxyProviderSvc);
125
126 // Setup the callback for event skipping
127 auto skipEvent_callback = [this, mbBatchSize](
129 ISkipEventIdxSvc::EvtIter end) -> StatusCode {
130 using namespace std::chrono_literals;
131 auto evts = ranges::make_subrange(begin, end);
132 ATH_MSG_INFO("Skipping " << end - begin << " HS events.");
133 auto batches_all =
134 evts | rv::transform([this](const ISkipEventIdxSvc::EvtId& evt) {
135 return event_to_batch(evt.evtIdx);
136 });
137 std::vector<std::tuple<int, int>> batches_with_counts{};
138 // Produce a list of batches, and how many times they appear
139 for (int batch : batches_all) {
140 // First entry
141 if (batches_with_counts.empty()) {
142 batches_with_counts.emplace_back(batch, 1);
143 continue;
144 }
145 // Subsequent entries
146 auto& last_entry = batches_with_counts.back();
147 if (batch == std::get<0>(last_entry)) {
148 std::get<1>(last_entry) += 1;
149 continue;
150 }
151 batches_with_counts.emplace_back(batch, 1);
152 }
153
154 // Discard batches
155 const int hs_batch_size = m_HSBatchSize.value();
156 auto* const old_store = m_activeStoreSvc->activeStore();
157 m_activeStoreSvc->setStore(m_spare_store.get());
158 ATH_CHECK(m_spare_store->clearStore());
159 for (const auto& [batch, count] : batches_with_counts) {
160 if (m_cache.count(batch) != 0) {
161 // batch is currently loaded, just update the use count
162 m_batch_use_count[batch]->fetch_add(count);
163 continue;
164 }
165 // force ordering in background stream
166 while (m_last_loaded_batch < batch - 1) {
167 std::this_thread::sleep_for(50ms);
168 }
169 // if we aren't skipping all the hardscatters in the batch, do nothing
170 if ((m_batch_use_count[batch]->fetch_add(count) + count) <
171 hs_batch_size) {
172 continue;
173 }
174 // otherwise discard the batch
175 ATH_MSG_INFO("Discarding batch " << batch);
176 std::unique_lock lck{m_reading_batch_mtx};
177 if (!m_bkgEventSelector->next(*m_bkg_evt_sel_ctx, mbBatchSize)
178 .isSuccess()) {
179 ATH_MSG_INFO("Ran out of background events");
180 return StatusCode::FAILURE;
181 }
182 // increment counters
183 m_last_loaded_batch.fetch_add(1);
184 }
185 ATH_CHECK(m_spare_store->clearStore());
186 m_activeStoreSvc->setStore(old_store);
187 return StatusCode::SUCCESS;
188 };
189
190 // register callback
191 ATH_CHECK(m_skipEventIdxSvc->registerCallback(skipEvent_callback));
192 return StatusCode::SUCCESS;
193}
ServiceHandle< ISkipEventIdxSvc > m_skipEventIdxSvc
Gaudi::Property< int > m_NSimultaneousBatches
std::vector< EvtId >::const_iterator EvtIter
@ PILEUP_STORE
Definition StoreID.h:31
int count(std::string s, const std::string &regx)
count how many occurances of a regx are in a string
Definition hcg.cxx:146

Member Data Documentation

◆ m_activeStoreSvc

ServiceHandle<ActiveStoreSvc> BatchedMinbiasSvc::m_activeStoreSvc
private
Initial value:
{
this, "ActiveStoreSvc", "ActiveStoreSvc", "ActiveStoreSvc"}

Definition at line 97 of file BatchedMinbiasSvc.h.

97 {
98 this, "ActiveStoreSvc", "ActiveStoreSvc", "ActiveStoreSvc"};

◆ m_actualNHSEventsPerBatch

Gaudi::Property<std::vector<int> > BatchedMinbiasSvc::m_actualNHSEventsPerBatch
private
Initial value:
{
this,
"actualNHSEventsPerBatch",
{},
"Dynamic map of actual number of HS events for each batch, in this run."}

Definition at line 83 of file BatchedMinbiasSvc.h.

83 {
84 this,
85 "actualNHSEventsPerBatch",
86 {},
87 "Dynamic map of actual number of HS events for each batch, in this run."};

◆ m_batch_use_count

std::vector<std::unique_ptr<std::atomic_int> > BatchedMinbiasSvc::m_batch_use_count
private

Definition at line 113 of file BatchedMinbiasSvc.h.

◆ m_beamInt

ServiceHandle<IBeamIntensity> BatchedMinbiasSvc::m_beamInt
private
Initial value:
{this, "BeamIntSvc", "FlatBM",
"Beam intensity service"}

Definition at line 93 of file BatchedMinbiasSvc.h.

93 {this, "BeamIntSvc", "FlatBM",
94 "Beam intensity service"};

◆ m_beamLumi

ServiceHandle<IBeamLuminosity> BatchedMinbiasSvc::m_beamLumi
private
Initial value:
{
this, "BeamLumiSvc", "LumiProfileSvc", "Beam luminosity service"}

Definition at line 95 of file BatchedMinbiasSvc.h.

95 {
96 this, "BeamLumiSvc", "LumiProfileSvc", "Beam luminosity service"};

◆ m_bkg_evt_sel_ctx

IEvtSelector::Context* BatchedMinbiasSvc::m_bkg_evt_sel_ctx
private

Definition at line 103 of file BatchedMinbiasSvc.h.

◆ m_bkgEventSelector

ServiceHandle<IEvtSelector> BatchedMinbiasSvc::m_bkgEventSelector
private
Initial value:
{
this, "BkgEventSelector", {}, "Event selector for minbias events"}

Definition at line 91 of file BatchedMinbiasSvc.h.

91 {
92 this, "BkgEventSelector", {}, "Event selector for minbias events"};

◆ m_cache

std::map<int, std::unique_ptr<SGHandleArray> > BatchedMinbiasSvc::m_cache
private

Definition at line 107 of file BatchedMinbiasSvc.h.

◆ m_cache_mtxs

std::map<int, std::mutex> BatchedMinbiasSvc::m_cache_mtxs
private

Definition at line 108 of file BatchedMinbiasSvc.h.

◆ m_earliestDeltaBC

Gaudi::Property<int> BatchedMinbiasSvc::m_earliestDeltaBC
private
Initial value:
{
this, "EarliestDeltaBC", -32,
"Earliest bunch crossing to consider (as delta)"}

Definition at line 77 of file BatchedMinbiasSvc.h.

77 {
78 this, "EarliestDeltaBC", -32,
79 "Earliest bunch crossing to consider (as delta)"};

◆ m_empty_caches

std::deque<std::unique_ptr<SGHandleArray> > BatchedMinbiasSvc::m_empty_caches
private

Definition at line 111 of file BatchedMinbiasSvc.h.

◆ m_empty_caches_mtx

std::mutex BatchedMinbiasSvc::m_empty_caches_mtx
private

Definition at line 112 of file BatchedMinbiasSvc.h.

◆ m_HSBatchSize

Gaudi::Property<int> BatchedMinbiasSvc::m_HSBatchSize
private
Initial value:
{
this, "HSBatchSize", 1,
"Number of HS events per batch (aka max reuse factor)"}

Definition at line 69 of file BatchedMinbiasSvc.h.

69 {
70 this, "HSBatchSize", 1,
71 "Number of HS events per batch (aka max reuse factor)"};

◆ m_idx_lists

std::vector<std::vector<std::uint64_t> > BatchedMinbiasSvc::m_idx_lists
private

Definition at line 106 of file BatchedMinbiasSvc.h.

◆ m_last_loaded_batch

std::atomic_int BatchedMinbiasSvc::m_last_loaded_batch
private

Definition at line 114 of file BatchedMinbiasSvc.h.

◆ m_latestDeltaBC

Gaudi::Property<int> BatchedMinbiasSvc::m_latestDeltaBC
private
Initial value:
{
this, "LatestDeltaBC", +6,
"Latest bunch crossing to consider (as delta)"}

Definition at line 80 of file BatchedMinbiasSvc.h.

80 {
81 this, "LatestDeltaBC", +6,
82 "Latest bunch crossing to consider (as delta)"};

◆ m_MBBatchSize

Gaudi::Property<int> BatchedMinbiasSvc::m_MBBatchSize
private
Initial value:
{
this, "MBBatchSize", 10000,
"Number of low pT minbias events to load per batch"}

Definition at line 63 of file BatchedMinbiasSvc.h.

63 {
64 this, "MBBatchSize", 10000,
65 "Number of low pT minbias events to load per batch"};

◆ m_nPerBunch

Gaudi::Property<float> BatchedMinbiasSvc::m_nPerBunch
private
Initial value:
{
this, "AvgMBPerBunch", 0,
"Average (max) number of minbias events per bunch"}

Definition at line 74 of file BatchedMinbiasSvc.h.

74 {
75 this, "AvgMBPerBunch", 0,
76 "Average (max) number of minbias events per bunch"};

◆ m_NSimultaneousBatches

Gaudi::Property<int> BatchedMinbiasSvc::m_NSimultaneousBatches
private
Initial value:
{
this, "NSimultaneousBatches", 1,
"Max number of batches to load simultaneously"}

Definition at line 66 of file BatchedMinbiasSvc.h.

66 {
67 this, "NSimultaneousBatches", 1,
68 "Max number of batches to load simultaneously"};

◆ m_num_mb_by_bunch

std::vector<std::vector<std::uint64_t> > BatchedMinbiasSvc::m_num_mb_by_bunch
private

Definition at line 105 of file BatchedMinbiasSvc.h.

◆ m_onDemandMB

Gaudi::Property<bool> BatchedMinbiasSvc::m_onDemandMB
private
Initial value:
{
this, "OnDemandMB", false,
"Should minbias event contents be read on demand"}

Definition at line 53 of file BatchedMinbiasSvc.h.

53 {
54 this, "OnDemandMB", false,
55 "Should minbias event contents be read on demand"};

◆ m_reading_batch_mtx

std::mutex BatchedMinbiasSvc::m_reading_batch_mtx
private

Definition at line 110 of file BatchedMinbiasSvc.h.

◆ m_seed

Gaudi::Property<std::uint64_t> BatchedMinbiasSvc::m_seed
private
Initial value:
{this, "Seed", 0,
"Additional seed for PRNGs"}

Definition at line 51 of file BatchedMinbiasSvc.h.

51 {this, "Seed", 0,
52 "Additional seed for PRNGs"};

◆ m_skipEventIdxSvc

ServiceHandle<ISkipEventIdxSvc> BatchedMinbiasSvc::m_skipEventIdxSvc
private
Initial value:
{
this, "SkipEvtIdxSvc", "SkipEventIdxSvc",
"Skipped event index (run / lb num) provider"}

Definition at line 88 of file BatchedMinbiasSvc.h.

88 {
89 this, "SkipEvtIdxSvc", "SkipEventIdxSvc",
90 "Skipped event index (run / lb num) provider"};

◆ m_skippedHSEvents

Gaudi::Property<int> BatchedMinbiasSvc::m_skippedHSEvents
private
Initial value:
{this, "SkippedHSEvents", 0,
"Number of skipped HS events"}

Definition at line 72 of file BatchedMinbiasSvc.h.

72 {this, "SkippedHSEvents", 0,
73 "Number of skipped HS events"};

◆ m_spare_store

SGHandle BatchedMinbiasSvc::m_spare_store
private
Initial value:
{this, "StoreGateSvc",
std::format("StoreGateSvc/discards_{}", name()),
"StoreGate for discarding events"}

Definition at line 100 of file BatchedMinbiasSvc.h.

100 {this, "StoreGateSvc",
101 std::format("StoreGateSvc/discards_{}", name()),
102 "StoreGate for discarding events"};

◆ m_useBeamInt

Gaudi::Property<bool> BatchedMinbiasSvc::m_useBeamInt
private
Initial value:
{
this, "UseBeamInt", true, "Whether to use the beam intensity service"}

Definition at line 59 of file BatchedMinbiasSvc.h.

59 {
60 this, "UseBeamInt", true, "Whether to use the beam intensity service"};

◆ m_useBeamLumi

Gaudi::Property<bool> BatchedMinbiasSvc::m_useBeamLumi
private
Initial value:
{
this, "UseBeamLumi", true, "Whether to use the beam luminosity service"}

Definition at line 61 of file BatchedMinbiasSvc.h.

61 {
62 this, "UseBeamLumi", true, "Whether to use the beam luminosity service"};

◆ m_usePoisson

Gaudi::Property<bool> BatchedMinbiasSvc::m_usePoisson
private
Initial value:
{this, "UsePoisson", true,
"Whether to use a Poisson distribution "
"(if False, use a delta distribution)"}

Definition at line 56 of file BatchedMinbiasSvc.h.

56 {this, "UsePoisson", true,
57 "Whether to use a Poisson distribution "
58 "(if False, use a delta distribution)"};

The documentation for this class was generated from the following files: