ATLAS Offline Software
BatchedMinbiasSvc.cxx
Go to the documentation of this file.
1 /*
2  Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
3 */
4 
5 #include "BatchedMinbiasSvc.h"
6 
7 #include <GaudiKernel/ConcurrencyFlags.h>
8 #include <fmt/chrono.h>
9 #include <fmt/format.h>
10 
11 #include <algorithm>
12 #include <boost/core/demangle.hpp>
13 #include <chrono>
14 #include <cmath>
15 #include <random>
16 #include <range/v3/algorithm/stable_sort.hpp>
17 #include <range/v3/numeric/accumulate.hpp>
18 #include <range/v3/to_container.hpp>
19 #include <range/v3/view.hpp>
20 #include <thread>
21 
25 #include "EventInfo/EventID.h"
26 #include "EventInfo/EventInfo.h"
29 
30 namespace rv = ranges::views;
31 
32 inline std::string CLIDToString(const CLID& clid) {
33  return boost::core::demangle(CLIDRegistry::CLIDToTypeinfo(clid)->name());
34 }
35 
36 BatchedMinbiasSvc::BatchedMinbiasSvc(const std::string& name, ISvcLocator* svc)
37  : base_class(name, svc),
38  m_bkg_evt_sel_ctx(nullptr),
39  m_last_loaded_batch() {}
40 
42 
43 int BatchedMinbiasSvc::event_to_batch(std::int64_t hs_id) {
44  return int(hs_id / m_HSBatchSize.value());
45 }
46 
48  ATH_CHECK(m_skipEventIdxSvc.retrieve());
49  ATH_CHECK(m_beamInt.retrieve());
50  ATH_CHECK(m_beamLumi.retrieve());
51  std::size_t n_concurrent =
52  Gaudi::Concurrency::ConcurrencyFlags::numConcurrentEvents();
53  m_idx_lists.clear();
54  m_idx_lists.resize(n_concurrent);
55 
56  m_num_mb_by_bunch.clear();
57  m_num_mb_by_bunch.resize(n_concurrent);
58 
59  m_cache.clear();
60  m_empty_caches.clear();
61  m_batch_use_count.clear();
62  m_batch_use_count.reserve(m_actualNHSEventsPerBatch.value().size());
63  for (std::size_t i = 0; i < m_actualNHSEventsPerBatch.value().size(); ++i) {
64  m_batch_use_count.emplace_back(std::make_unique<std::atomic_int>(0));
65  }
66  ATH_CHECK(m_bkgEventSelector.retrieve());
67  ATH_CHECK(m_activeStoreSvc.retrieve());
68  // Setup context
69  if (!m_bkgEventSelector->createContext(m_bkg_evt_sel_ctx).isSuccess()) {
70  ATH_MSG_ERROR("Failed to create background event selector context");
71  return StatusCode::FAILURE;
72  }
73  ATH_CHECK(SmartIF<IService>(m_bkgEventSelector.get())->start());
74 
75  // Setup proxy provider
76  SmartIF<IProxyProviderSvc> proxyProviderSvc{
77  serviceLocator()->service(fmt::format("ProxyProviderSvc/BkgPPSvc_{}", name()))
78  };
79  ATH_CHECK(proxyProviderSvc.isValid());
80 
81  // Setup Address Providers
82  SmartIF<IAddressProvider> addressProvider{m_bkgEventSelector.get()};
83  if (!addressProvider) {
85  "Could not cast background event selector to IAddressProvider");
86  } else {
87  proxyProviderSvc->addProvider(addressProvider);
88  }
89  // AthenaPoolAddressProviderSvc
90  SmartIF<IAddressProvider> athPoolAP{
91  serviceLocator()->service(fmt::format("AthenaPoolAddressProviderSvc/BkgAPAPSvc_{}", name()))
92  };
93  if (!athPoolAP) {
95  "Could not cast AthenaPoolAddressProviderSvc to IAddressProvider");
96  } else {
97  proxyProviderSvc->addProvider(athPoolAP);
98  }
99  // AddressRemappingSvc
100  SmartIF<IAddressProvider> addRemapAP{
101  serviceLocator()->service(fmt::format("AddressRemappingSvc/BkgARSvc_{}", name()))
102  };
103  if (!addRemapAP) {
104  ATH_MSG_WARNING("Could not cast AddressRemappingSvc to IAddressProvider");
105  } else {
106  proxyProviderSvc->addProvider(addRemapAP);
107  }
108 
109  int mbBatchSize = m_MBBatchSize.value();
110  // setup NSimultaneousBatches vectors of MBBatchSize StoreGates in
111  // m_empty_caches
112  for (int i = 0; i < m_NSimultaneousBatches.value(); ++i) {
113  auto& sgs = m_empty_caches.emplace_back(std::make_unique<SGHandleArray>());
114  sgs->reserve(mbBatchSize);
115  for (int j = 0; j < mbBatchSize; ++j) {
116  // creates / retrieves a different StoreGateSvc for each slot
117  auto& sg = sgs->emplace_back(
118  fmt::format("StoreGateSvc/StoreGate_{}_{}_{}", name(), i, j), name());
119  ATH_CHECK(sg.retrieve());
120  sg->setStoreID(StoreID::PILEUP_STORE);
121  sg->setProxyProviderSvc(proxyProviderSvc);
122  }
123  }
124 
125  // Setup the spare store for event skipping
126  ATH_CHECK(m_spare_store.retrieve());
128  m_spare_store->setProxyProviderSvc(proxyProviderSvc);
129 
130  // Setup the callback for event skipping
131  auto skipEvent_callback = [this, mbBatchSize](
134  using namespace std::chrono_literals;
135  auto evts = ranges::make_subrange(begin, end);
136  ATH_MSG_INFO("Skipping " << end - begin << " HS events.");
137  auto batches_all =
138  evts | rv::transform([this](const ISkipEventIdxSvc::EvtId& evt) {
139  return event_to_batch(evt.evtIdx);
140  });
141  std::vector<std::tuple<int, int>> batches_with_counts{};
142  // Produce a list of batches, and how many times they appear
143  for (int batch : batches_all) {
144  // First entry
145  if (batches_with_counts.empty()) {
146  batches_with_counts.emplace_back(batch, 1);
147  continue;
148  }
149  // Subsequent entries
150  auto& last_entry = batches_with_counts.back();
151  if (batch == std::get<0>(last_entry)) {
152  std::get<1>(last_entry) += 1;
153  continue;
154  }
155  batches_with_counts.emplace_back(batch, 1);
156  }
157 
158  // Discard batches
159  const int hs_batch_size = m_HSBatchSize.value();
160  auto* const old_store = m_activeStoreSvc->activeStore();
161  m_activeStoreSvc->setStore(m_spare_store.get());
162  ATH_CHECK(m_spare_store->clearStore());
163  for (const auto& [batch, count] : batches_with_counts) {
164  if (m_cache.count(batch) != 0) {
165  // batch is currently loaded, just update the use count
166  m_batch_use_count[batch]->fetch_add(count);
167  continue;
168  }
169  // force ordering in background stream
170  while (m_last_loaded_batch < batch - 1) {
171  std::this_thread::sleep_for(50ms);
172  }
173  // if we aren't skipping all the hardscatters in the batch, do nothing
174  if ((m_batch_use_count[batch]->fetch_add(count) + count) <
175  hs_batch_size) {
176  continue;
177  }
178  // otherwise discard the batch
179  ATH_MSG_INFO("Discarding batch " << batch);
180  std::unique_lock lck{m_reading_batch_mtx};
181  if (!m_bkgEventSelector->next(*m_bkg_evt_sel_ctx, mbBatchSize)
182  .isSuccess()) {
183  ATH_MSG_INFO("Ran out of background events");
184  return StatusCode::FAILURE;
185  }
186  // increment counters
187  m_last_loaded_batch.fetch_add(1);
188  }
189  ATH_CHECK(m_spare_store->clearStore());
190  m_activeStoreSvc->setStore(old_store);
191  return StatusCode::SUCCESS;
192  };
193 
194  // register callback
195  ATH_CHECK(m_skipEventIdxSvc->registerCallback(skipEvent_callback));
196  return StatusCode::SUCCESS;
197 }
198 
199 std::size_t BatchedMinbiasSvc::calcMBRequired(std::int64_t hs_id,
200  std::size_t slot,
201  unsigned int run,
202  unsigned int lumi,
204  const int n_bunches = m_latestDeltaBC.value() - m_earliestDeltaBC.value() + 1;
205  FastReseededPRNG prng{m_seed.value(), hs_id};
206 
207  // First apply the beam luminosity SF
208  bool sf_updated_throwaway;
209  const float beam_lumi_sf =
210  m_useBeamLumi ? m_beamLumi->scaleFactor(run, lumi, sf_updated_throwaway)
211  : 1.f;
212  std::vector<float> avg_num_mb_by_bunch(n_bunches,
213  beam_lumi_sf * m_nPerBunch.value());
214  // Now update using beam intensities
215  if (m_useBeamInt) {
216  // Supposed to be once per event, but ends up running once per minbias type
217  // per event now
218  m_beamInt->selectT0(run, event);
219  for (int bunch = m_earliestDeltaBC.value();
220  bunch <= m_latestDeltaBC.value(); ++bunch) {
221  std::size_t idx = bunch - m_earliestDeltaBC.value();
222  avg_num_mb_by_bunch[idx] *= m_beamInt->normFactor(bunch);
223  }
224  }
225 
226  std::vector<std::uint64_t>& num_mb_by_bunch = m_num_mb_by_bunch[slot];
227  num_mb_by_bunch.clear();
228  num_mb_by_bunch.resize(n_bunches);
229 
230  if (m_usePoisson) {
231  std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
232  num_mb_by_bunch.begin(), [&prng](float avg) {
233  return std::poisson_distribution<std::uint64_t>(avg)(prng);
234  });
235  } else {
236  std::transform(avg_num_mb_by_bunch.begin(), avg_num_mb_by_bunch.end(),
237  num_mb_by_bunch.begin(), [](float f) {
238  return static_cast<std::uint64_t>(std::round(f));
239  });
240  }
241 
242  std::uint64_t num_mb = ranges::accumulate(num_mb_by_bunch, 0UL);
243  std::vector<std::uint64_t>& index_array = m_idx_lists[slot];
244  const std::uint64_t mbBatchSize = m_MBBatchSize.value();
245  // Prevent running out of events
246  if (num_mb > mbBatchSize) {
247  const int center_bunch = -m_earliestDeltaBC.value();
248  auto indices =
249  rv::iota(0ULL, num_mb_by_bunch.size()) |
250  rv::filter([center_bunch, &num_mb_by_bunch](int idx) {
251  bool good = idx != center_bunch; // filter out the central bunch
252  good =
253  good && num_mb_by_bunch[idx] > 0; // filter out unfilled bunches
254  return good;
255  }) |
256  ranges::to<std::vector>;
257  // sort by distance from central bunch
258  ranges::stable_sort(indices, std::greater{},
259  [center_bunch](std::size_t idx) {
260  return std::size_t(std::abs(int(idx) - center_bunch));
261  });
262  // subtract from bunches until we aren't using too many events
263  for (auto idx : indices) {
264  const std::uint64_t max_to_subtract = num_mb - mbBatchSize;
265  const std::uint64_t num_subtracted =
266  std::min(max_to_subtract, num_mb_by_bunch[idx]);
267  num_mb_by_bunch[idx] -= num_subtracted;
268  num_mb -= num_subtracted;
269  if (num_mb <= mbBatchSize) {
270  break;
271  }
272  }
273  // Print an error anyway so we can fix the job
274  ATH_MSG_ERROR("We need " << num_mb << " events but the batch size is "
275  << mbBatchSize << ". Restricting to "
276  << mbBatchSize << " events!");
277  }
278  index_array = rv::ints(0, int(mbBatchSize)) | rv::sample(num_mb, prng) |
279  ranges::to<std::vector<std::uint64_t>>;
280  ranges::shuffle(index_array, prng);
281  if (m_HSBatchSize > 1) {
282  ATH_MSG_DEBUG("HS ID " << hs_id << " uses " << num_mb << " events");
283  } else {
284  ATH_MSG_DEBUG("HS ID " << hs_id << " uses " << num_mb << " events\n"
285  << fmt::format("\t\tBy bunch: [{}]\n",
286  fmt::join(num_mb_by_bunch, ", "))
287  << fmt::format("\t\tOrder: [{}]",
288  fmt::join(index_array, ", ")));
289  }
290  return num_mb;
291 }
292 
294  using namespace std::chrono_literals;
295  bool first_wait = true;
296  std::chrono::steady_clock::time_point cache_wait_start{};
297  std::chrono::steady_clock::time_point order_wait_start{};
298  const std::int64_t hs_id = get_hs_id(ctx);
299  const int batch = event_to_batch(hs_id);
300  calcMBRequired(hs_id, ctx.slot(),
301  ctx.eventID().run_number(), // don't need the total, only
302  ctx.eventID().lumi_block(), // need to populate the arrays
303  ctx.eventID().event_number());
304  while (true) {
305  if (m_cache.count(batch) != 0) {
306  // batch already loaded
307  // mutex prevents returning when batch is partially loaded
308  m_cache_mtxs[batch].lock();
309  m_cache_mtxs[batch].unlock();
310  return StatusCode::SUCCESS;
311  }
312  // prevent batches loading out-of-order
313  if (m_last_loaded_batch < (batch - 1)) {
314  ATH_MSG_INFO("Waiting to prevent out-of-order loading of batches");
315  order_wait_start = std::chrono::steady_clock::now();
316  while (m_last_loaded_batch < (batch - 1)) {
317  std::this_thread::sleep_for(50ms);
318  }
319  auto wait_time = std::chrono::steady_clock::now() - order_wait_start;
321  "Waited {:%M:%S} to prevent out-of-order loading", wait_time));
322  }
323  // See if there are any free caches
324  // Using try_lock here to avoid reading same batch twice
325  if (m_empty_caches_mtx.try_lock()) {
326  if (m_empty_caches.empty()) {
327  // Unlock mutex if we got the lock but there were no free caches
328  m_empty_caches_mtx.unlock();
329  if (first_wait) {
330  ATH_MSG_INFO("Waiting for a free cache");
331  first_wait = false;
332  cache_wait_start = std::chrono::steady_clock::now();
333  }
334  // Wait 100ms then try again
335  std::this_thread::sleep_for(100ms);
336  continue;
337  }
338  if (!first_wait) {
339  auto wait_time = std::chrono::steady_clock::now() - cache_wait_start;
340  ATH_MSG_INFO(
341  fmt::format("Waited {:%M:%S} for a free cache", wait_time));
342  }
343  std::scoped_lock reading{m_cache_mtxs[batch], m_reading_batch_mtx};
344  if (m_HSBatchSize != 0) {
345  ATH_MSG_INFO("Reading next batch in event " << ctx.evt() << ", slot "
346  << ctx.slot() << " (hs_id "
347  << hs_id << ")");
348  }
349  auto start_time = std::chrono::system_clock::now();
350  m_cache[batch] = std::move(m_empty_caches.front());
351  m_empty_caches.pop_front();
352  // Remember old store to reset later
353  auto* old_store = m_activeStoreSvc->activeStore();
354  for (auto&& sg : *m_cache[batch]) {
355  // Change active store
356  m_activeStoreSvc->setStore(sg.get());
357  SG::CurrentEventStore::Push reader_sg_ces(sg.get());
358  // Read next event
359  ATH_CHECK(sg->clearStore(true));
360  if (!(m_bkgEventSelector->next(*m_bkg_evt_sel_ctx)).isSuccess()) {
361  ATH_MSG_FATAL("Ran out of minbias events");
362  return StatusCode::FAILURE;
363  }
364  IOpaqueAddress* addr = nullptr;
365  if (!m_bkgEventSelector->createAddress(*m_bkg_evt_sel_ctx, addr)
366  .isSuccess()) {
367  ATH_MSG_WARNING("Failed to create address. No more events?");
368  return StatusCode::FAILURE;
369  }
370  if (addr == nullptr) {
371  ATH_MSG_WARNING("createAddress returned nullptr. No more events?");
372  return StatusCode::FAILURE;
373  }
374  ATH_CHECK(sg->recordAddress(addr));
375  ATH_CHECK(sg->loadEventProxies());
376  // Read data now if desired
377  for (const auto* proxy_ptr : sg->proxies()) {
378  if (!proxy_ptr->isValid()) {
379  continue;
380  }
381 
382  if (!m_onDemandMB) {
383  // Sort of a const_cast, then ->accessData()
384  sg->proxy_exact(proxy_ptr->sgkey())->accessData();
385  }
386  }
387  }
388  // Reset active store
389  m_activeStoreSvc->setStore(old_store);
390  if (m_HSBatchSize != 0) {
392  "Reading {} events took {:%OMm %OSs}", m_cache[batch]->size(),
393  std::chrono::system_clock::now() - start_time));
394  }
395  m_empty_caches_mtx.unlock();
396  m_last_loaded_batch.exchange(batch);
397  return StatusCode::SUCCESS;
398  }
399  }
400  return StatusCode::SUCCESS;
401 }
402 
404  std::uint64_t mb_id) {
405  const std::int64_t hs_id = get_hs_id(ctx);
406  const std::size_t slot = ctx.slot();
407  const std::size_t index = m_idx_lists.at(slot).at(mb_id);
408  const int batch = event_to_batch(hs_id);
409  return m_cache[batch]->at(index).get();
410 }
411 
412 std::size_t BatchedMinbiasSvc::getNumForBunch(const EventContext& ctx,
413  int bunch) const {
414  if (bunch < m_earliestDeltaBC.value() || bunch > m_latestDeltaBC.value()) {
415  throw std::logic_error(fmt::format(
416  "Tried to request bunch {} which is outside the range [{}, {}]", bunch,
417  m_earliestDeltaBC.value(), m_latestDeltaBC.value()));
418  }
419  return m_num_mb_by_bunch.at(ctx.slot()).at(bunch - m_earliestDeltaBC.value());
420 }
421 
423  using namespace std::chrono_literals;
424  const std::int64_t hs_id = get_hs_id(ctx);
425  const int batch = event_to_batch(hs_id);
426  const int uses = m_batch_use_count[batch]->fetch_add(1) + 1;
427 
428  // If we're done with every event in the batch, clear the stores and return
429  // them
430  if (uses == m_HSBatchSize.value()) {
431  std::unique_ptr temp = std::move(m_cache[batch]);
432  m_cache.erase(batch);
433  for (auto&& sg : *temp) {
434  ATH_CHECK(sg->clearStore());
435  }
436  std::lock_guard lg{m_empty_caches_mtx};
437  m_empty_caches.emplace_back(std::move(temp));
438  } else {
439  ATH_MSG_DEBUG("BATCH " << batch << ": " << uses << " uses out of "
440  << m_HSBatchSize << " "
442  }
443  return StatusCode::SUCCESS;
444 }
CLIDToString
std::string CLIDToString(const CLID &clid)
Definition: BatchedMinbiasSvc.cxx:32
CurrentEventStore.h
Hold a pointer to the current event store.
BatchedMinbiasSvc::m_skipEventIdxSvc
ServiceHandle< ISkipEventIdxSvc > m_skipEventIdxSvc
Definition: BatchedMinbiasSvc.h:87
ATH_MSG_FATAL
#define ATH_MSG_FATAL(x)
Definition: AthMsgStreamMacros.h:34
BatchedMinbiasSvc::m_empty_caches
std::deque< std::unique_ptr< SGHandleArray > > m_empty_caches
Definition: BatchedMinbiasSvc.h:110
BatchedMinbiasSvc::m_earliestDeltaBC
Gaudi::Property< int > m_earliestDeltaBC
Definition: BatchedMinbiasSvc.h:76
FastReseededPRNG.h
ISkipEventIdxSvc::EvtId
Definition: ISkipEventIdxSvc.h:16
BatchedMinbiasSvc::~BatchedMinbiasSvc
~BatchedMinbiasSvc()
Destructor.
Definition: BatchedMinbiasSvc.cxx:41
vtune_athena.format
format
Definition: vtune_athena.py:14
ATH_MSG_INFO
#define ATH_MSG_INFO(x)
Definition: AthMsgStreamMacros.h:31
BatchedMinbiasSvc::m_MBBatchSize
Gaudi::Property< int > m_MBBatchSize
Definition: BatchedMinbiasSvc.h:62
CaloCellPos2Ntuple.int
int
Definition: CaloCellPos2Ntuple.py:24
index
Definition: index.py:1
min
constexpr double min()
Definition: ap_fixedTest.cxx:26
Trk::indices
std::pair< long int, long int > indices
Definition: AlSymMatBase.h:24
BatchedMinbiasSvc::event_to_batch
int event_to_batch(std::int64_t hs_id)
Definition: BatchedMinbiasSvc.cxx:43
accumulate
bool accumulate(AccumulateMap &map, std::vector< module_t > const &modules, FPGATrackSimMatrixAccumulator const &acc)
Accumulates an accumulator (e.g.
Definition: FPGATrackSimMatrixAccumulator.cxx:22
PlotCalibFromCool.begin
begin
Definition: PlotCalibFromCool.py:94
SG::CurrentEventStore::Push
Temporarily change the current store.
Definition: SGTools/SGTools/CurrentEventStore.h:58
CLIDRegistry::CLIDToTypeinfo
static const std::type_info * CLIDToTypeinfo(CLID clid)
Translate between CLID and type_info.
Definition: CLIDRegistry.cxx:136
BatchedMinbiasSvc::getNumForBunch
std::size_t getNumForBunch(const EventContext &ctx, int bunch) const override
Definition: BatchedMinbiasSvc.cxx:412
LArG4FSStartPointFilter.evt
evt
Definition: LArG4FSStartPointFilter.py:42
BatchedMinbiasSvc::m_seed
Gaudi::Property< std::uint64_t > m_seed
Definition: BatchedMinbiasSvc.h:50
Preparation.batch
batch
Definition: Preparation.py:49
python.SystemOfUnits.ms
int ms
Definition: SystemOfUnits.py:132
BatchedMinbiasSvc::m_empty_caches_mtx
std::mutex m_empty_caches_mtx
Definition: BatchedMinbiasSvc.h:111
CxxUtils::fpcompare::greater
bool greater(double a, double b)
Compare two FP numbers, working around x87 precision issues.
Definition: fpcompare.h:140
BatchedMinbiasSvc::m_usePoisson
Gaudi::Property< bool > m_usePoisson
Definition: BatchedMinbiasSvc.h:55
XMLtoHeader.count
count
Definition: XMLtoHeader.py:85
BatchedMinbiasSvc::m_HSBatchSize
Gaudi::Property< int > m_HSBatchSize
Definition: BatchedMinbiasSvc.h:68
mergePhysValFiles.end
end
Definition: DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py:93
covarianceTool.filter
filter
Definition: covarianceTool.py:514
BatchedMinbiasSvc.h
BatchedMinbiasSvc::initialize
StatusCode initialize() override
AthService initialize.
Definition: BatchedMinbiasSvc.cxx:47
BatchedMinbiasSvc::m_useBeamInt
Gaudi::Property< bool > m_useBeamInt
Definition: BatchedMinbiasSvc.h:58
python.setupRTTAlg.size
int size
Definition: setupRTTAlg.py:39
BatchedMinbiasSvc::m_onDemandMB
Gaudi::Property< bool > m_onDemandMB
Definition: BatchedMinbiasSvc.h:52
BatchedMinbiasSvc::endHardScatter
StatusCode endHardScatter(const EventContext &ctx) override
Definition: BatchedMinbiasSvc.cxx:422
StoreGateSvc
The Athena Transient Store API.
Definition: StoreGateSvc.h:125
EventID.h
This class provides a unique identification for each event, in terms of run/event number and/or a tim...
python.handimod.now
now
Definition: handimod.py:675
BatchedMinbiasSvc::m_cache_mtxs
std::map< int, std::mutex > m_cache_mtxs
Definition: BatchedMinbiasSvc.h:107
ATH_MSG_ERROR
#define ATH_MSG_ERROR(x)
Definition: AthMsgStreamMacros.h:33
event
POOL::TEvent event(POOL::TEvent::kClassAccess)
FullCPAlgorithmsTest_eljob.sample
sample
Definition: FullCPAlgorithmsTest_eljob.py:113
StoreID::PILEUP_STORE
@ PILEUP_STORE
Definition: StoreID.h:31
lumiFormat.i
int i
Definition: lumiFormat.py:85
ISkipEventIdxSvc::EvtIter
std::vector< EvtId >::const_iterator EvtIter
Definition: ISkipEventIdxSvc.h:23
BatchedMinbiasSvc::m_cache
std::map< int, std::unique_ptr< SGHandleArray > > m_cache
Definition: BatchedMinbiasSvc.h:106
EL::StatusCode
::StatusCode StatusCode
StatusCode definition for legacy code.
Definition: PhysicsAnalysis/D3PDTools/EventLoop/EventLoop/StatusCode.h:22
Recovery.avg
def avg(a, b)
Definition: Recovery.py:79
ATH_MSG_DEBUG
#define ATH_MSG_DEBUG(x)
Definition: AthMsgStreamMacros.h:29
Amg::transform
Amg::Vector3D transform(Amg::Vector3D &v, Amg::Transform3D &tr)
Transform a point from a Trasformation3D.
Definition: GeoPrimitivesHelpers.h:156
xAOD::uint64_t
uint64_t
Definition: EventInfo_v1.cxx:123
ATH_CHECK
#define ATH_CHECK
Definition: AthCheckMacros.h:40
hist_file_dump.f
f
Definition: hist_file_dump.py:135
run
Definition: run.py:1
Handler::svc
AthROOTErrorHandlerSvc * svc
Definition: AthROOTErrorHandlerSvc.cxx:10
CLID
uint32_t CLID
The Class ID type.
Definition: Event/xAOD/xAODCore/xAODCore/ClassID_traits.h:47
BatchedMinbiasSvc::m_bkg_evt_sel_ctx
IEvtSelector::Context * m_bkg_evt_sel_ctx
Definition: BatchedMinbiasSvc.h:102
BatchedMinbiasSvc::m_latestDeltaBC
Gaudi::Property< int > m_latestDeltaBC
Definition: BatchedMinbiasSvc.h:79
BatchedMinbiasSvc::m_activeStoreSvc
ServiceHandle< ActiveStoreSvc > m_activeStoreSvc
Definition: BatchedMinbiasSvc.h:96
BatchedMinbiasSvc::calcMBRequired
std::size_t calcMBRequired(std::int64_t hs_id, std::size_t slot, unsigned int run, unsigned int lumi, std::uint64_t event)
Definition: BatchedMinbiasSvc.cxx:199
TCS::join
std::string join(const std::vector< std::string > &v, const char c=',')
Definition: Trigger/TrigT1/L1Topo/L1TopoCommon/Root/StringUtils.cxx:10
name
std::string name
Definition: Control/AthContainers/Root/debug.cxx:228
BatchedMinbiasSvc::BatchedMinbiasSvc
BatchedMinbiasSvc(const std::string &name, ISvcLocator *svc)
Constructor.
Definition: BatchedMinbiasSvc.cxx:36
BatchedMinbiasSvc::m_batch_use_count
std::vector< std::unique_ptr< std::atomic_int > > m_batch_use_count
Definition: BatchedMinbiasSvc.h:112
dq_make_web_display.rv
def rv
Definition: dq_make_web_display.py:219
BatchedMinbiasSvc::m_actualNHSEventsPerBatch
Gaudi::Property< std::vector< int > > m_actualNHSEventsPerBatch
Definition: BatchedMinbiasSvc.h:82
EventInfo.h
BatchedMinbiasSvc::m_nPerBunch
Gaudi::Property< float > m_nPerBunch
Definition: BatchedMinbiasSvc.h:73
BatchedMinbiasSvc::m_num_mb_by_bunch
std::vector< std::vector< std::uint64_t > > m_num_mb_by_bunch
Definition: BatchedMinbiasSvc.h:104
BatchedMinbiasSvc::get_hs_id
virtual std::int64_t get_hs_id(const EventContext &ctx) const override
Definition: BatchedMinbiasSvc.h:42
PlotCalibFromCool.lg
lg
Definition: PlotCalibFromCool.py:748
ReadBchFromCool.good
good
Definition: ReadBchFromCool.py:433
lumiFormat.lumi
lumi
Definition: lumiFormat.py:106
ATH_MSG_WARNING
#define ATH_MSG_WARNING(x)
Definition: AthMsgStreamMacros.h:32
BatchedMinbiasSvc::m_spare_store
SGHandle m_spare_store
Definition: BatchedMinbiasSvc.h:99
BatchedMinbiasSvc::m_bkgEventSelector
ServiceHandle< IEvtSelector > m_bkgEventSelector
Definition: BatchedMinbiasSvc.h:90
LArNewCalib_DelayDump_OFC_Cali.idx
idx
Definition: LArNewCalib_DelayDump_OFC_Cali.py:69
BatchedMinbiasSvc::m_NSimultaneousBatches
Gaudi::Property< int > m_NSimultaneousBatches
Definition: BatchedMinbiasSvc.h:65
BatchedMinbiasSvc::m_useBeamLumi
Gaudi::Property< bool > m_useBeamLumi
Definition: BatchedMinbiasSvc.h:60
IProxyProviderSvc.h
BatchedMinbiasSvc::m_idx_lists
std::vector< std::vector< std::uint64_t > > m_idx_lists
Definition: BatchedMinbiasSvc.h:105
IAddressProvider.h
BatchedMinbiasSvc::m_last_loaded_batch
std::atomic_int m_last_loaded_batch
Definition: BatchedMinbiasSvc.h:113
BatchedMinbiasSvc::m_beamLumi
ServiceHandle< IBeamLuminosity > m_beamLumi
Definition: BatchedMinbiasSvc.h:94
BatchedMinbiasSvc::getMinbias
StoreGateSvc * getMinbias(const EventContext &ctx, std::uint64_t mb_id) override
Definition: BatchedMinbiasSvc.cxx:403
BatchedMinbiasSvc::beginHardScatter
StatusCode beginHardScatter(const EventContext &ctx) override
Definition: BatchedMinbiasSvc.cxx:293
BatchedMinbiasSvc::m_reading_batch_mtx
std::mutex m_reading_batch_mtx
Definition: BatchedMinbiasSvc.h:109
BatchedMinbiasSvc::m_beamInt
ServiceHandle< IBeamIntensity > m_beamInt
Definition: BatchedMinbiasSvc.h:92
FastReseededPRNG
Definition: FastReseededPRNG.h:28