ATLAS Offline Software
Loading...
Searching...
No Matches
BatchedMinbiasSvc Class Reference

#include <BatchedMinbiasSvc.h>

Inheritance diagram for BatchedMinbiasSvc:

Public Member Functions

 BatchedMinbiasSvc (const std::string &name, ISvcLocator *svc)
 Constructor.
 ~BatchedMinbiasSvc ()
 Destructor.
StatusCode initialize () override
 AthService initialize.
StatusCode beginHardScatter (const EventContext &ctx) override
StoreGateSvcgetMinbias (const EventContext &ctx, std::uint64_t mb_id) override
std::size_t getNumForBunch (const EventContext &ctx, int bunch) const override
virtual std::int64_t get_hs_id (const EventContext &ctx) const override
StatusCode endHardScatter (const EventContext &ctx) override

Private Types

using SGHandle = ServiceHandle<StoreGateSvc>
using SGHandleArray = std::vector<SGHandle>

Private Member Functions

int event_to_batch (std::int64_t hs_id)
std::size_t calcMBRequired (std::int64_t hs_id, std::size_t slot, unsigned int run, unsigned int lumi, std::uint64_t event)

Private Attributes

Gaudi::Property< std::uint64_t > m_seed
Gaudi::Property< bool > m_onDemandMB
Gaudi::Property< bool > m_usePoisson
Gaudi::Property< bool > m_useBeamInt
Gaudi::Property< bool > m_useBeamLumi
Gaudi::Property< int > m_MBBatchSize
Gaudi::Property< int > m_NSimultaneousBatches
Gaudi::Property< int > m_HSBatchSize
Gaudi::Property< int > m_skippedHSEvents
Gaudi::Property< float > m_nPerBunch
Gaudi::Property< int > m_earliestDeltaBC
Gaudi::Property< int > m_latestDeltaBC
Gaudi::Property< std::vector< int > > m_actualNHSEventsPerBatch
ServiceHandle< ISkipEventIdxSvcm_skipEventIdxSvc
ServiceHandle< IEvtSelector > m_bkgEventSelector
ServiceHandle< IBeamIntensitym_beamInt
ServiceHandle< IBeamLuminositym_beamLumi
ServiceHandle< ActiveStoreSvcm_activeStoreSvc
SGHandle m_spare_store
IEvtSelector::Context * m_bkg_evt_sel_ctx
std::vector< std::vector< std::uint64_t > > m_num_mb_by_bunch
std::vector< std::vector< std::uint64_t > > m_idx_lists
std::map< int, std::unique_ptr< SGHandleArray > > m_cache
std::map< int, std::mutex > m_cache_mtxs
std::mutex m_reading_batch_mtx
std::deque< std::unique_ptr< SGHandleArray > > m_empty_caches
std::mutex m_empty_caches_mtx
std::vector< std::unique_ptr< std::atomic_int > > m_batch_use_count
std::atomic_int m_last_loaded_batch

Detailed Description

Definition at line 28 of file BatchedMinbiasSvc.h.

Member Typedef Documentation

◆ SGHandle

Definition at line 49 of file BatchedMinbiasSvc.h.

◆ SGHandleArray

using BatchedMinbiasSvc::SGHandleArray = std::vector<SGHandle>
private

Definition at line 50 of file BatchedMinbiasSvc.h.

Constructor & Destructor Documentation

◆ BatchedMinbiasSvc()

BatchedMinbiasSvc::BatchedMinbiasSvc ( const std::string & name,
ISvcLocator * svc )

Constructor.

Definition at line 33 of file BatchedMinbiasSvc.cxx.

34 : base_class(name, svc),
35 m_bkg_evt_sel_ctx(nullptr),
IEvtSelector::Context * m_bkg_evt_sel_ctx
std::atomic_int m_last_loaded_batch

◆ ~BatchedMinbiasSvc()

BatchedMinbiasSvc::~BatchedMinbiasSvc ( )

Destructor.

Definition at line 38 of file BatchedMinbiasSvc.cxx.

38{}

Member Function Documentation

◆ beginHardScatter()

StatusCode BatchedMinbiasSvc::beginHardScatter ( const EventContext & ctx)
override

Definition at line 287 of file BatchedMinbiasSvc.cxx.

287 {
288 using namespace std::chrono_literals;
289 bool first_wait = true;
290 std::chrono::steady_clock::time_point cache_wait_start{};
291 std::chrono::steady_clock::time_point order_wait_start{};
292 const std::int64_t hs_id = get_hs_id(ctx);
293 const int batch = event_to_batch(hs_id);
294 calcMBRequired(hs_id, ctx.slot(),
295 ctx.eventID().run_number(), // don't need the total, only
296 ctx.eventID().lumi_block(), // need to populate the arrays
297 ctx.eventID().event_number());
298 while (true) {
299 if (m_cache.count(batch) != 0) {
300 // batch already loaded
301 // mutex prevents returning when batch is partially loaded
302 m_cache_mtxs[batch].lock();
303 m_cache_mtxs[batch].unlock();
304 return StatusCode::SUCCESS;
305 }
306 // prevent batches loading out-of-order
307 if (m_last_loaded_batch < (batch - 1)) {
308 ATH_MSG_INFO("Waiting to prevent out-of-order loading of batches");
309 order_wait_start = std::chrono::steady_clock::now();
310 while (m_last_loaded_batch < (batch - 1)) {
311 std::this_thread::sleep_for(50ms);
312 }
313 auto wait_time = std::chrono::steady_clock::now() - order_wait_start;
314 ATH_MSG_INFO(std::format(
315 "Waited {:%M:%S} to prevent out-of-order loading", wait_time));
316 }
317 // See if there are any free caches
318 // Using try_lock here to avoid reading same batch twice
319 std::unique_lock<std::mutex> empty_caches_lock (m_empty_caches_mtx,
320 std::try_to_lock);
321 if (empty_caches_lock.owns_lock()) {
322 if (m_empty_caches.empty()) {
323 // Unlock mutex if we got the lock but there were no free caches
324 empty_caches_lock.unlock();
325 if (first_wait) {
326 ATH_MSG_INFO("Waiting for a free cache");
327 first_wait = false;
328 cache_wait_start = std::chrono::steady_clock::now();
329 }
330 // Wait 100ms then try again
331 std::this_thread::sleep_for(100ms);
332 continue;
333 }
334 if (!first_wait) {
335 auto wait_time = std::chrono::steady_clock::now() - cache_wait_start;
337 std::format("Waited {:%M:%S} for a free cache", wait_time));
338 }
339 std::scoped_lock reading{m_cache_mtxs[batch], m_reading_batch_mtx};
340 if (m_HSBatchSize != 0) {
341 ATH_MSG_INFO("Reading next batch in event " << ctx.evt() << ", slot "
342 << ctx.slot() << " (hs_id "
343 << hs_id << ")");
344 }
345 auto start_time = std::chrono::system_clock::now();
346 m_cache[batch] = std::move(m_empty_caches.front());
347 m_empty_caches.pop_front();
348 // Remember old store to reset later
349 auto* old_store = m_activeStoreSvc->activeStore();
350 for (auto&& sg : *m_cache[batch]) {
351 // Change active store
352 m_activeStoreSvc->setStore(sg.get());
353 SG::CurrentEventStore::Push reader_sg_ces(sg.get());
354 // Read next event
355 ATH_CHECK(sg->clearStore(true));
356 if (!(m_bkgEventSelector->next(*m_bkg_evt_sel_ctx)).isSuccess()) {
357 ATH_MSG_FATAL("Ran out of minbias events");
358 return StatusCode::FAILURE;
359 }
360 IOpaqueAddress* addr = nullptr;
361 if (!m_bkgEventSelector->createAddress(*m_bkg_evt_sel_ctx, addr)
362 .isSuccess()) {
363 ATH_MSG_WARNING("Failed to create address. No more events?");
364 return StatusCode::FAILURE;
365 }
366 if (addr == nullptr) {
367 ATH_MSG_WARNING("createAddress returned nullptr. No more events?");
368 return StatusCode::FAILURE;
369 }
370 ATH_CHECK(sg->recordAddress(addr));
371 ATH_CHECK(sg->loadEventProxies());
372 // Read data now if desired
373 if (!m_onDemandMB) {
374 for (const auto* proxy_ptr : sg->proxies()) {
375 if (!proxy_ptr->isValid()) {
376 continue;
377 }
378
379 // Sort of a const_cast, then ->accessData()
380 sg->proxy_exact(proxy_ptr->sgkey())->accessData();
381 }
382 }
383 }
384 // Reset active store
385 m_activeStoreSvc->setStore(old_store);
386 if (m_HSBatchSize != 0) {
387 ATH_MSG_INFO(std::format(
388 "Reading {} events took {:%OMm %OSs}", m_cache[batch]->size(),
389 std::chrono::system_clock::now() - start_time));
390 }
391 m_last_loaded_batch.exchange(batch);
392 return StatusCode::SUCCESS;
393 }
394 }
395 return StatusCode::SUCCESS;
396}
#define ATH_CHECK
Evaluate an expression and check for errors.
#define ATH_MSG_FATAL(x)
#define ATH_MSG_INFO(x)
#define ATH_MSG_WARNING(x)
ServiceHandle< IEvtSelector > m_bkgEventSelector
std::map< int, std::unique_ptr< SGHandleArray > > m_cache
Gaudi::Property< bool > m_onDemandMB
int event_to_batch(std::int64_t hs_id)
ServiceHandle< ActiveStoreSvc > m_activeStoreSvc
std::deque< std::unique_ptr< SGHandleArray > > m_empty_caches
std::mutex m_reading_batch_mtx
std::mutex m_empty_caches_mtx
virtual std::int64_t get_hs_id(const EventContext &ctx) const override
Gaudi::Property< int > m_HSBatchSize
std::map< int, std::mutex > m_cache_mtxs
std::size_t calcMBRequired(std::int64_t hs_id, std::size_t slot, unsigned int run, unsigned int lumi, std::uint64_t event)

◆ calcMBRequired()

std::size_t BatchedMinbiasSvc::calcMBRequired ( std::int64_t hs_id,
std::size_t slot,
unsigned int run,
unsigned int lumi,
std::uint64_t event )
private

Definition at line 196 of file BatchedMinbiasSvc.cxx.

200 {
201 const int n_bunches = m_latestDeltaBC.value() - m_earliestDeltaBC.value() + 1;
202 FastReseededPRNG prng{m_seed.value(), hs_id};
203
204 // First apply the beam luminosity SF
205 bool sf_updated_throwaway;
206 const float beam_lumi_sf =
207 m_useBeamLumi ? m_beamLumi->scaleFactor(run, lumi, sf_updated_throwaway)
208 : 1.f;
209 std::vector<float> avg_num_mb_by_bunch(n_bunches,
210 beam_lumi_sf * m_nPerBunch.value());
211 // Now update using beam intensities
212 if (m_useBeamInt) {
213 // Supposed to be once per event, but ends up running once per minbias type
214 // per event now
215 m_beamInt->selectT0(run, event);
216 for (int bunch = m_earliestDeltaBC.value();
217 bunch <= m_latestDeltaBC.value(); ++bunch) {
218 std::size_t idx = bunch - m_earliestDeltaBC.value();
219 avg_num_mb_by_bunch[idx] *= m_beamInt->normFactor(bunch);
220 }
221 }
222
223 std::vector<std::uint64_t>& num_mb_by_bunch = m_num_mb_by_bunch[slot];
224 num_mb_by_bunch.clear();
225 num_mb_by_bunch.resize(n_bunches);
226
227 if (m_usePoisson) {
228 std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
229 num_mb_by_bunch.begin(), [&prng](float avg) {
230 return std::poisson_distribution<std::uint64_t>(avg)(prng);
231 });
232 } else {
233 std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
234 num_mb_by_bunch.begin(), [](float f) {
235 return static_cast<std::uint64_t>(std::round(f));
236 });
237 }
238
239 std::uint64_t num_mb = ranges::accumulate(num_mb_by_bunch, 0UL);
240 std::vector<std::uint64_t>& index_array = m_idx_lists[slot];
241 const std::uint64_t mbBatchSize = m_MBBatchSize.value();
242 // Prevent running out of events
243 if (num_mb > mbBatchSize) {
244 const int center_bunch = -m_earliestDeltaBC.value();
245 auto indices =
246 rv::iota(0ULL, num_mb_by_bunch.size()) |
247 rv::filter([center_bunch, &num_mb_by_bunch](int idx) {
248 bool good = idx != center_bunch; // filter out the central bunch
249 good =
250 good && num_mb_by_bunch[idx] > 0; // filter out unfilled bunches
251 return good;
252 }) |
253 ranges::to<std::vector>;
254 // sort by distance from central bunch
255 ranges::stable_sort(indices, std::greater{},
256 [center_bunch](std::size_t idx) {
257 return std::size_t(std::abs(int(idx) - center_bunch));
258 });
259 // subtract from bunches until we aren't using too many events
260 for (auto idx : indices) {
261 const std::uint64_t max_to_subtract = num_mb - mbBatchSize;
262 const std::uint64_t num_subtracted =
263 std::min(max_to_subtract, num_mb_by_bunch[idx]);
264 num_mb_by_bunch[idx] -= num_subtracted;
265 num_mb -= num_subtracted;
266 if (num_mb <= mbBatchSize) {
267 break;
268 }
269 }
270 // Print an error anyway so we can fix the job
271 ATH_MSG_ERROR("We need " << num_mb << " events but the batch size is "
272 << mbBatchSize << ". Restricting to "
273 << mbBatchSize << " events!");
274 }
275 index_array = rv::ints(0, int(mbBatchSize)) | rv::sample(num_mb, prng) |
276 ranges::to<std::vector<std::uint64_t>>;
277 ranges::shuffle(index_array, prng);
278 ATH_MSG_DEBUG("HS ID " << hs_id << " uses " << num_mb << " events");
279 // Disabled until C++ 23 range formatting can be used
280 // if (m_HSBatchSize <= 1) {
281 // ATH_MSG_DEBUG(fmt::format("\t\tBy bunch: [{}]\n", fmt::join(num_mb_by_bunch, ", "))
282 // << fmt::format("\t\tOrder: [{}]", fmt::join(index_array, ", ")));
283 // }
284 return num_mb;
285}
#define ATH_MSG_ERROR(x)
#define ATH_MSG_DEBUG(x)
Gaudi::Property< std::uint64_t > m_seed
Gaudi::Property< float > m_nPerBunch
Gaudi::Property< bool > m_useBeamLumi
std::vector< std::vector< std::uint64_t > > m_idx_lists
ServiceHandle< IBeamIntensity > m_beamInt
Gaudi::Property< int > m_earliestDeltaBC
Gaudi::Property< bool > m_usePoisson
std::vector< std::vector< std::uint64_t > > m_num_mb_by_bunch
Gaudi::Property< int > m_latestDeltaBC
Gaudi::Property< bool > m_useBeamInt
Gaudi::Property< int > m_MBBatchSize
ServiceHandle< IBeamLuminosity > m_beamLumi
std::pair< long int, long int > indices
int run(int argc, char *argv[])

◆ endHardScatter()

StatusCode BatchedMinbiasSvc::endHardScatter ( const EventContext & ctx)
override

Definition at line 417 of file BatchedMinbiasSvc.cxx.

417 {
418 using namespace std::chrono_literals;
419 const std::int64_t hs_id = get_hs_id(ctx);
420 const int batch = event_to_batch(hs_id);
421 const int uses = m_batch_use_count[batch]->fetch_add(1) + 1;
422
423 // If we're done with every event in the batch, clear the stores and return
424 // them
425 if (uses == m_HSBatchSize.value()) {
426 std::unique_ptr temp = std::move(m_cache[batch]);
427 m_cache.erase(batch);
428 for (auto&& sg : *temp) {
429 ATH_CHECK(sg->clearStore());
430 }
431 std::lock_guard lg{m_empty_caches_mtx};
432 m_empty_caches.emplace_back(std::move(temp));
433 } else {
434 ATH_MSG_DEBUG("BATCH " << batch << ": " << uses << " uses out of "
435 << m_HSBatchSize << " "
437 }
438 return StatusCode::SUCCESS;
439}
Gaudi::Property< std::vector< int > > m_actualNHSEventsPerBatch
std::vector< std::unique_ptr< std::atomic_int > > m_batch_use_count

◆ event_to_batch()

int BatchedMinbiasSvc::event_to_batch ( std::int64_t hs_id)
private

Definition at line 40 of file BatchedMinbiasSvc.cxx.

40 {
41 return int(hs_id / m_HSBatchSize.value());
42}

◆ get_hs_id()

virtual std::int64_t BatchedMinbiasSvc::get_hs_id ( const EventContext & ctx) const
inlineoverridevirtual

Definition at line 43 of file BatchedMinbiasSvc.h.

43 {
44 return m_skippedHSEvents.value() + ctx.evt();
45 }
Gaudi::Property< int > m_skippedHSEvents

◆ getMinbias()

StoreGateSvc * BatchedMinbiasSvc::getMinbias ( const EventContext & ctx,
std::uint64_t mb_id )
override

Definition at line 398 of file BatchedMinbiasSvc.cxx.

399 {
400 const std::int64_t hs_id = get_hs_id(ctx);
401 const std::size_t slot = ctx.slot();
402 const std::size_t index = m_idx_lists.at(slot).at(mb_id);
403 const int batch = event_to_batch(hs_id);
404 return m_cache[batch]->at(index).get();
405}
str index
Definition DeMoScan.py:362

◆ getNumForBunch()

std::size_t BatchedMinbiasSvc::getNumForBunch ( const EventContext & ctx,
int bunch ) const
override

Definition at line 407 of file BatchedMinbiasSvc.cxx.

408 {
409 if (bunch < m_earliestDeltaBC.value() || bunch > m_latestDeltaBC.value()) {
410 throw std::logic_error(std::format(
411 "Tried to request bunch {} which is outside the range [{}, {}]", bunch,
412 m_earliestDeltaBC.value(), m_latestDeltaBC.value()));
413 }
414 return m_num_mb_by_bunch.at(ctx.slot()).at(bunch - m_earliestDeltaBC.value());
415}

◆ initialize()

StatusCode BatchedMinbiasSvc::initialize ( )
override

AthService initialize.

Definition at line 44 of file BatchedMinbiasSvc.cxx.

44 {
45 ATH_CHECK(m_skipEventIdxSvc.retrieve());
46 ATH_CHECK(m_beamInt.retrieve());
47 ATH_CHECK(m_beamLumi.retrieve());
48 std::size_t n_concurrent =
49 Gaudi::Concurrency::ConcurrencyFlags::numConcurrentEvents();
50 m_idx_lists.clear();
51 m_idx_lists.resize(n_concurrent);
52
53 m_num_mb_by_bunch.clear();
54 m_num_mb_by_bunch.resize(n_concurrent);
55
56 m_cache.clear();
57 m_empty_caches.clear();
58 m_batch_use_count.clear();
59 m_batch_use_count.reserve(m_actualNHSEventsPerBatch.value().size());
60 for (std::size_t i = 0; i < m_actualNHSEventsPerBatch.value().size(); ++i) {
61 m_batch_use_count.emplace_back(std::make_unique<std::atomic_int>(0));
62 }
63 ATH_CHECK(m_bkgEventSelector.retrieve());
64 ATH_CHECK(m_activeStoreSvc.retrieve());
65 // Setup context
66 if (!m_bkgEventSelector->createContext(m_bkg_evt_sel_ctx).isSuccess()) {
67 ATH_MSG_ERROR("Failed to create background event selector context");
68 return StatusCode::FAILURE;
69 }
70 ATH_CHECK(SmartIF<IService>(m_bkgEventSelector.get())->start());
71
72 // Setup proxy provider
73 SmartIF<IProxyProviderSvc> proxyProviderSvc{
74 serviceLocator()->service(std::format("ProxyProviderSvc/BkgPPSvc_{}", name()))
75 };
76 ATH_CHECK(proxyProviderSvc.isValid());
77
78 // Setup Address Providers
79 SmartIF<IAddressProvider> addressProvider{m_bkgEventSelector.get()};
80 if (!addressProvider) {
82 "Could not cast background event selector to IAddressProvider");
83 } else {
84 proxyProviderSvc->addProvider(addressProvider);
85 }
86 // AthenaPoolAddressProviderSvc
87 SmartIF<IAddressProvider> athPoolAP{
88 serviceLocator()->service(std::format("AthenaPoolAddressProviderSvc/BkgAPAPSvc_{}", name()))
89 };
90 if (!athPoolAP) {
92 "Could not cast AthenaPoolAddressProviderSvc to IAddressProvider");
93 } else {
94 proxyProviderSvc->addProvider(athPoolAP);
95 }
96 // AddressRemappingSvc
97 SmartIF<IAddressProvider> addRemapAP{
98 serviceLocator()->service(std::format("AddressRemappingSvc/BkgARSvc_{}", name()))
99 };
100 if (!addRemapAP) {
101 ATH_MSG_WARNING("Could not cast AddressRemappingSvc to IAddressProvider");
102 } else {
103 proxyProviderSvc->addProvider(addRemapAP);
104 }
105
106 int mbBatchSize = m_MBBatchSize.value();
107 // setup NSimultaneousBatches vectors of MBBatchSize StoreGates in
108 // m_empty_caches
109 for (int i = 0; i < m_NSimultaneousBatches.value(); ++i) {
110 auto& sgs = m_empty_caches.emplace_back(std::make_unique<SGHandleArray>());
111 sgs->reserve(mbBatchSize);
112 for (int j = 0; j < mbBatchSize; ++j) {
113 // creates / retrieves a different StoreGateSvc for each slot
114 auto& sg = sgs->emplace_back(
115 std::format("StoreGateSvc/StoreGate_{}_{}_{}", name(), i, j), name());
116 ATH_CHECK(sg.retrieve());
117 sg->setStoreID(StoreID::PILEUP_STORE);
118 sg->setProxyProviderSvc(proxyProviderSvc);
119 }
120 }
121
122 // Setup the spare store for event skipping
123 ATH_CHECK(m_spare_store.retrieve());
125 m_spare_store->setProxyProviderSvc(proxyProviderSvc);
126
127 // Setup the callback for event skipping
128 auto skipEvent_callback = [this, mbBatchSize](
130 ISkipEventIdxSvc::EvtIter end) -> StatusCode {
131 using namespace std::chrono_literals;
132 auto evts = ranges::make_subrange(begin, end);
133 ATH_MSG_INFO("Skipping " << end - begin << " HS events.");
134 auto batches_all =
135 evts | rv::transform([this](const ISkipEventIdxSvc::EvtId& evt) {
136 return event_to_batch(evt.evtIdx);
137 });
138 std::vector<std::tuple<int, int>> batches_with_counts{};
139 // Produce a list of batches, and how many times they appear
140 for (int batch : batches_all) {
141 // First entry
142 if (batches_with_counts.empty()) {
143 batches_with_counts.emplace_back(batch, 1);
144 continue;
145 }
146 // Subsequent entries
147 auto& last_entry = batches_with_counts.back();
148 if (batch == std::get<0>(last_entry)) {
149 std::get<1>(last_entry) += 1;
150 continue;
151 }
152 batches_with_counts.emplace_back(batch, 1);
153 }
154
155 // Discard batches
156 const int hs_batch_size = m_HSBatchSize.value();
157 auto* const old_store = m_activeStoreSvc->activeStore();
158 m_activeStoreSvc->setStore(m_spare_store.get());
159 ATH_CHECK(m_spare_store->clearStore());
160 for (const auto& [batch, count] : batches_with_counts) {
161 if (m_cache.count(batch) != 0) {
162 // batch is currently loaded, just update the use count
163 m_batch_use_count[batch]->fetch_add(count);
164 continue;
165 }
166 // force ordering in background stream
167 while (m_last_loaded_batch < batch - 1) {
168 std::this_thread::sleep_for(50ms);
169 }
170 // if we aren't skipping all the hardscatters in the batch, do nothing
171 if ((m_batch_use_count[batch]->fetch_add(count) + count) <
172 hs_batch_size) {
173 continue;
174 }
175 // otherwise discard the batch
176 ATH_MSG_INFO("Discarding batch " << batch);
177 std::unique_lock lck{m_reading_batch_mtx};
178 if (!m_bkgEventSelector->next(*m_bkg_evt_sel_ctx, mbBatchSize)
179 .isSuccess()) {
180 ATH_MSG_INFO("Ran out of background events");
181 return StatusCode::FAILURE;
182 }
183 // increment counters
184 m_last_loaded_batch.fetch_add(1);
185 }
186 ATH_CHECK(m_spare_store->clearStore());
187 m_activeStoreSvc->setStore(old_store);
188 return StatusCode::SUCCESS;
189 };
190
191 // register callback
192 ATH_CHECK(m_skipEventIdxSvc->registerCallback(skipEvent_callback));
193 return StatusCode::SUCCESS;
194}
ServiceHandle< ISkipEventIdxSvc > m_skipEventIdxSvc
Gaudi::Property< int > m_NSimultaneousBatches
std::vector< EvtId >::const_iterator EvtIter
@ PILEUP_STORE
Definition StoreID.h:31
int count(std::string s, const std::string &regx)
count how many occurances of a regx are in a string
Definition hcg.cxx:146
float j(const xAOD::IParticle &, const xAOD::TrackMeasurementValidation &hit, const Eigen::Matrix3d &jab_inv)

Member Data Documentation

◆ m_activeStoreSvc

ServiceHandle<ActiveStoreSvc> BatchedMinbiasSvc::m_activeStoreSvc
private
Initial value:
{
this, "ActiveStoreSvc", "ActiveStoreSvc", "ActiveStoreSvc"}

Definition at line 97 of file BatchedMinbiasSvc.h.

97 {
98 this, "ActiveStoreSvc", "ActiveStoreSvc", "ActiveStoreSvc"};

◆ m_actualNHSEventsPerBatch

Gaudi::Property<std::vector<int> > BatchedMinbiasSvc::m_actualNHSEventsPerBatch
private
Initial value:
{
this,
"actualNHSEventsPerBatch",
{},
"Dynamic map of actual number of HS events for each batch, in this run."}

Definition at line 83 of file BatchedMinbiasSvc.h.

83 {
84 this,
85 "actualNHSEventsPerBatch",
86 {},
87 "Dynamic map of actual number of HS events for each batch, in this run."};

◆ m_batch_use_count

std::vector<std::unique_ptr<std::atomic_int> > BatchedMinbiasSvc::m_batch_use_count
private

Definition at line 113 of file BatchedMinbiasSvc.h.

◆ m_beamInt

ServiceHandle<IBeamIntensity> BatchedMinbiasSvc::m_beamInt
private
Initial value:
{this, "BeamIntSvc", "FlatBM",
"Beam intensity service"}

Definition at line 93 of file BatchedMinbiasSvc.h.

93 {this, "BeamIntSvc", "FlatBM",
94 "Beam intensity service"};

◆ m_beamLumi

ServiceHandle<IBeamLuminosity> BatchedMinbiasSvc::m_beamLumi
private
Initial value:
{
this, "BeamLumiSvc", "LumiProfileSvc", "Beam luminosity service"}

Definition at line 95 of file BatchedMinbiasSvc.h.

95 {
96 this, "BeamLumiSvc", "LumiProfileSvc", "Beam luminosity service"};

◆ m_bkg_evt_sel_ctx

IEvtSelector::Context* BatchedMinbiasSvc::m_bkg_evt_sel_ctx
private

Definition at line 103 of file BatchedMinbiasSvc.h.

◆ m_bkgEventSelector

ServiceHandle<IEvtSelector> BatchedMinbiasSvc::m_bkgEventSelector
private
Initial value:
{
this, "BkgEventSelector", {}, "Event selector for minbias events"}

Definition at line 91 of file BatchedMinbiasSvc.h.

91 {
92 this, "BkgEventSelector", {}, "Event selector for minbias events"};

◆ m_cache

std::map<int, std::unique_ptr<SGHandleArray> > BatchedMinbiasSvc::m_cache
private

Definition at line 107 of file BatchedMinbiasSvc.h.

◆ m_cache_mtxs

std::map<int, std::mutex> BatchedMinbiasSvc::m_cache_mtxs
private

Definition at line 108 of file BatchedMinbiasSvc.h.

◆ m_earliestDeltaBC

Gaudi::Property<int> BatchedMinbiasSvc::m_earliestDeltaBC
private
Initial value:
{
this, "EarliestDeltaBC", -32,
"Earliest bunch crossing to consider (as delta)"}

Definition at line 77 of file BatchedMinbiasSvc.h.

77 {
78 this, "EarliestDeltaBC", -32,
79 "Earliest bunch crossing to consider (as delta)"};

◆ m_empty_caches

std::deque<std::unique_ptr<SGHandleArray> > BatchedMinbiasSvc::m_empty_caches
private

Definition at line 111 of file BatchedMinbiasSvc.h.

◆ m_empty_caches_mtx

std::mutex BatchedMinbiasSvc::m_empty_caches_mtx
private

Definition at line 112 of file BatchedMinbiasSvc.h.

◆ m_HSBatchSize

Gaudi::Property<int> BatchedMinbiasSvc::m_HSBatchSize
private
Initial value:
{
this, "HSBatchSize", 1,
"Number of HS events per batch (aka max reuse factor)"}

Definition at line 69 of file BatchedMinbiasSvc.h.

69 {
70 this, "HSBatchSize", 1,
71 "Number of HS events per batch (aka max reuse factor)"};

◆ m_idx_lists

std::vector<std::vector<std::uint64_t> > BatchedMinbiasSvc::m_idx_lists
private

Definition at line 106 of file BatchedMinbiasSvc.h.

◆ m_last_loaded_batch

std::atomic_int BatchedMinbiasSvc::m_last_loaded_batch
private

Definition at line 114 of file BatchedMinbiasSvc.h.

◆ m_latestDeltaBC

Gaudi::Property<int> BatchedMinbiasSvc::m_latestDeltaBC
private
Initial value:
{
this, "LatestDeltaBC", +6,
"Latest bunch crossing to consider (as delta)"}

Definition at line 80 of file BatchedMinbiasSvc.h.

80 {
81 this, "LatestDeltaBC", +6,
82 "Latest bunch crossing to consider (as delta)"};

◆ m_MBBatchSize

Gaudi::Property<int> BatchedMinbiasSvc::m_MBBatchSize
private
Initial value:
{
this, "MBBatchSize", 10000,
"Number of low pT minbias events to load per batch"}

Definition at line 63 of file BatchedMinbiasSvc.h.

63 {
64 this, "MBBatchSize", 10000,
65 "Number of low pT minbias events to load per batch"};

◆ m_nPerBunch

Gaudi::Property<float> BatchedMinbiasSvc::m_nPerBunch
private
Initial value:
{
this, "AvgMBPerBunch", 0,
"Average (max) number of minbias events per bunch"}

Definition at line 74 of file BatchedMinbiasSvc.h.

74 {
75 this, "AvgMBPerBunch", 0,
76 "Average (max) number of minbias events per bunch"};

◆ m_NSimultaneousBatches

Gaudi::Property<int> BatchedMinbiasSvc::m_NSimultaneousBatches
private
Initial value:
{
this, "NSimultaneousBatches", 1,
"Max number of batches to load simultaneously"}

Definition at line 66 of file BatchedMinbiasSvc.h.

66 {
67 this, "NSimultaneousBatches", 1,
68 "Max number of batches to load simultaneously"};

◆ m_num_mb_by_bunch

std::vector<std::vector<std::uint64_t> > BatchedMinbiasSvc::m_num_mb_by_bunch
private

Definition at line 105 of file BatchedMinbiasSvc.h.

◆ m_onDemandMB

Gaudi::Property<bool> BatchedMinbiasSvc::m_onDemandMB
private
Initial value:
{
this, "OnDemandMB", false,
"Should minbias event contents be read on demand"}

Definition at line 53 of file BatchedMinbiasSvc.h.

53 {
54 this, "OnDemandMB", false,
55 "Should minbias event contents be read on demand"};

◆ m_reading_batch_mtx

std::mutex BatchedMinbiasSvc::m_reading_batch_mtx
private

Definition at line 110 of file BatchedMinbiasSvc.h.

◆ m_seed

Gaudi::Property<std::uint64_t> BatchedMinbiasSvc::m_seed
private
Initial value:
{this, "Seed", 0,
"Additional seed for PRNGs"}

Definition at line 51 of file BatchedMinbiasSvc.h.

51 {this, "Seed", 0,
52 "Additional seed for PRNGs"};

◆ m_skipEventIdxSvc

ServiceHandle<ISkipEventIdxSvc> BatchedMinbiasSvc::m_skipEventIdxSvc
private
Initial value:
{
this, "SkipEvtIdxSvc", "SkipEventIdxSvc",
"Skipped event index (run / lb num) provider"}

Definition at line 88 of file BatchedMinbiasSvc.h.

88 {
89 this, "SkipEvtIdxSvc", "SkipEventIdxSvc",
90 "Skipped event index (run / lb num) provider"};

◆ m_skippedHSEvents

Gaudi::Property<int> BatchedMinbiasSvc::m_skippedHSEvents
private
Initial value:
{this, "SkippedHSEvents", 0,
"Number of skipped HS events"}

Definition at line 72 of file BatchedMinbiasSvc.h.

72 {this, "SkippedHSEvents", 0,
73 "Number of skipped HS events"};

◆ m_spare_store

SGHandle BatchedMinbiasSvc::m_spare_store
private
Initial value:
{this, "StoreGateSvc",
std::format("StoreGateSvc/discards_{}", name()),
"StoreGate for discarding events"}

Definition at line 100 of file BatchedMinbiasSvc.h.

100 {this, "StoreGateSvc",
101 std::format("StoreGateSvc/discards_{}", name()),
102 "StoreGate for discarding events"};

◆ m_useBeamInt

Gaudi::Property<bool> BatchedMinbiasSvc::m_useBeamInt
private
Initial value:
{
this, "UseBeamInt", true, "Whether to use the beam intensity service"}

Definition at line 59 of file BatchedMinbiasSvc.h.

59 {
60 this, "UseBeamInt", true, "Whether to use the beam intensity service"};

◆ m_useBeamLumi

Gaudi::Property<bool> BatchedMinbiasSvc::m_useBeamLumi
private
Initial value:
{
this, "UseBeamLumi", true, "Whether to use the beam luminosity service"}

Definition at line 61 of file BatchedMinbiasSvc.h.

61 {
62 this, "UseBeamLumi", true, "Whether to use the beam luminosity service"};

◆ m_usePoisson

Gaudi::Property<bool> BatchedMinbiasSvc::m_usePoisson
private
Initial value:
{this, "UsePoisson", true,
"Whether to use a Poisson distribution "
"(if False, use a delta distribution)"}

Definition at line 56 of file BatchedMinbiasSvc.h.

56 {this, "UsePoisson", true,
57 "Whether to use a Poisson distribution "
58 "(if False, use a delta distribution)"};

The documentation for this class was generated from the following files: