ATLAS Offline Software
MPIHiveEventLoopMgr.cxx
Go to the documentation of this file.
1 /*
2  Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
3 */
4 #include "MPIHiveEventLoopMgr.h"
5 
6 // Gaudi includes
7 #include "GaudiKernel/AppReturnCode.h"
8 
9 // Utilities
12 
13 // Standard Library
14 #include <chrono>
15 #include <fstream>
16 #include <string>
17 
18 using Clock = std::chrono::high_resolution_clock;
19 
20 // Standard Constructor
22  ISvcLocator* svcLoc)
23  : AthenaHiveEventLoopMgr(name, svcLoc) {}
24 
25 // Standard Destructor
27 
31  // Initialize cluster svc
32  ATH_CHECK(m_clusterSvc.retrieve());
34 }
35 
40 }
41 
45  // make nextEvent(0) a dummy call
46  if (0 == maxevt) {
47  return StatusCode::SUCCESS;
48  }
49 
50  // Reset the application return code.
51  Gaudi::setAppReturnCode(m_appMgrProperty, Gaudi::ReturnCode::Success, true)
52  .ignore();
53  ATH_MSG_INFO("Starting loop on events");
54 
55  if (m_clusterSvc->rank() == 0) {
56  return masterEventLoop(maxevt);
57  }
58  return workerEventLoop();
59 }
60 
63  ATH_MSG_INFO("Running with " << m_clusterSvc->numRanks() << " ranks");
64  // Determine number of events to process
65  int skipEvts = int(m_firstEventIndex.value());
66  if (m_evtSelector != nullptr) {
67  int evt = size();
68  if (evt == -1) {
69  m_clusterSvc->abort();
70  return StatusCode::FAILURE;
71  }
72  if (maxEvt < 0 || maxEvt > evt) {
73  maxEvt = evt;
74  }
75  ATH_MSG_INFO("Will be processing " << maxEvt << " events");
76  }
77 
78  // Setup worker status DB (Spare one at start)
79  std::vector<bool> workers_done(m_clusterSvc->numRanks(), false);
80  workers_done[0] =
81  true; // Set 0 to true because it doesn't correspond to a worker
82  int num_workers_done = 1; // Init to 1 so we can compare to numRanks
83  std::vector<ClusterMessage::WorkerStatus> statuses(m_clusterSvc->numRanks());
84 
85  // Entering event loop
86  m_clusterSvc->barrier();
87  // Note: no ++evt. This is because this is really a message loop, and we don't
88  // want to increment evt if we haven't provided an event
89  auto start = Clock::now();
90  for (int evt = skipEvts; evt < skipEvts + maxEvt;) {
91  ClusterMessage msg = m_clusterSvc->waitReceiveMessage();
92  // Messages we can get are RequestEvent, FinalWorkerStatus, or WorkerError
93  if (msg.messageType == ClusterMessageType::RequestEvent) {
94  ATH_MSG_DEBUG("Starting event " << evt << " on " << msg.source);
95  m_clusterSvc->sendMessage(
97  ++evt;
98  continue;
99  }
100 
101  if (msg.messageType == ClusterMessageType::WorkerError) {
102  ATH_MSG_ERROR("Received WorkerError message from " << msg.source);
103  statuses.at(msg.source) = get<ClusterMessage::WorkerStatus>(msg.payload);
104  workers_done.at(msg.source) =
105  true; // If a worker hits an error, it's done
106  ++num_workers_done;
107  for (int i = 1; i < m_clusterSvc->numRanks(); ++i) {
108  if (!workers_done.at(i)) {
109  // Tell workers that aren't done to emergency stop
110  m_clusterSvc->sendMessage(
112  workers_done[i] = true;
113  ++num_workers_done;
114  }
115  }
116  break;
117  }
118 
119  if (msg.messageType == ClusterMessageType::FinalWorkerStatus) {
120  ATH_MSG_INFO("Received FinalWorkerStatus from " << msg.source);
121  statuses.at(msg.source) = get<ClusterMessage::WorkerStatus>(msg.payload);
122  workers_done.at(msg.source) = true; // Worker hit end of stream
123  ++num_workers_done;
124  continue;
125  }
126 
127  // Other message types are an error
128  ATH_MSG_ERROR("Received unexpected message "
129  << std::format("{}", msg.messageType) << " from "
130  << msg.source);
131  }
132  auto all_provided = Clock::now() - start;
133  ATH_MSG_INFO("Provided all events to workers, waiting for them to complete.");
134  // Event loop done, tell remaining workers
135  while (num_workers_done < m_clusterSvc->numRanks()) {
136  ClusterMessage msg = m_clusterSvc->waitReceiveMessage();
137  // Messages we can get are RequestEvent, FinalWorkerStatus, or WorkerError
138  if (msg.messageType == ClusterMessageType::RequestEvent) {
139  m_clusterSvc->sendMessage(msg.source,
141  continue;
142  }
143 
144  if (msg.messageType == ClusterMessageType::WorkerError) {
145  ATH_MSG_ERROR("Received WorkerError message from " << msg.source);
146  statuses.at(msg.source) = get<ClusterMessage::WorkerStatus>(msg.payload);
147  workers_done.at(msg.source) =
148  true; // If a worker hits an error, it's done
149  ++num_workers_done;
150  for (int i = 1; i < m_clusterSvc->numRanks(); ++i) {
151  if (!workers_done.at(i)) {
152  // Tell workers that aren't done to emergency stop
153  m_clusterSvc->sendMessage(
155  workers_done[i] = true;
156  ++num_workers_done;
157  }
158  }
159  break;
160  }
161 
162  if (msg.messageType == ClusterMessageType::FinalWorkerStatus) {
163  ATH_MSG_INFO("Received FinalWorkerStatus from " << msg.source);
164  statuses.at(msg.source) = get<ClusterMessage::WorkerStatus>(msg.payload);
165  workers_done.at(msg.source) = true; // Told worker we're done
166  ++num_workers_done;
167  continue;
168  }
169 
170  // Other message types are an error
171  ATH_MSG_ERROR("Received unexpected message "
172  << std::format("{}", msg.messageType) << " from "
173  << msg.source);
174  }
175  auto all_done = Clock::now() - start;
176  // Collate status
177  int n_created = 0;
178  int n_skipped = 0;
179  int n_finished = 0;
180 
181  StatusCode sc = StatusCode::SUCCESS;
182  int worker_idx = 0;
183  for (const auto& worker_status : statuses) {
184  if (worker_status.status.isFailure() &&
185  worker_status.status != StatusCode(9999)) {
186  sc = worker_status.status;
187  }
188  n_created += worker_status.createdEvents;
189  n_skipped += worker_status.skippedEvents;
190  n_finished += worker_status.finishedEvents;
191 
192  if ((worker_idx++) != 0) {
193  ATH_MSG_INFO("Worker " << worker_idx << ": SC " << worker_status.status
194  << ", created " << worker_status.createdEvents
195  << ", skipped " << worker_status.skippedEvents
196  << ", finished " << worker_status.finishedEvents);
197  }
198  }
199 
200  ATH_MSG_INFO("Overall: SC " << sc << ", created " << n_created << ", skipped "
201  << n_skipped << ", finished " << n_finished);
202  ATH_MSG_INFO("MASTER: Took " << std::chrono::hh_mm_ss(all_provided)
203  << " to provide all events.");
204  ATH_MSG_INFO("MASTER: Took " << std::chrono::hh_mm_ss(all_done)
205  << " to complete all events.");
206  return sc;
207 }
208 
211  bool end_of_stream = false;
212  // barrier so all ranks enter message loop together
213  m_clusterSvc->barrier();
214  auto start = Clock::now();
216  while (true) {
217  // Drain the scheduler (wait for at least one event to complete, then free
218  // up completed slots) in two circumstances
219  // 1. Have created exactly one event, so the first event runs to completion
220  // before any more are scheduled
221  // 2. There are no free slots left
222  bool haveFreeSlots =
223  m_schedulerSvc->freeSlots() > 0 && m_whiteboard->freeSlots() > 0;
224  if (!haveFreeSlots || m_nLocalCreatedEvts == 1) {
226  if (sc.isFailure()) {
228  status.status = sc;
229  status.createdEvents = m_nLocalCreatedEvts;
230  status.skippedEvents = m_nLocalSkippedEvts;
231  status.finishedEvents = m_nLocalFinishedEvts;
232  m_clusterSvc->sendMessage(
234  return sc;
235  }
236  }
237 
238  auto start_time = Clock::now();
239  m_clusterSvc->sendMessage(0,
241  ClusterMessage msg = m_clusterSvc->waitReceiveMessage();
242  auto request_time = Clock::now() - start_time;
243  if (msg.messageType == ClusterMessageType::EmergencyStop) {
244  // Emergency stop, return FAILURE
245  ATH_MSG_ERROR("Received EmergencyStop message!");
246  return StatusCode::FAILURE;
247  }
248 
249  if (msg.messageType == ClusterMessageType::EventsDone) {
250  auto loop_time = Clock::now() - start;
251  ATH_MSG_INFO("Worker " << m_clusterSvc->rank() << " DONE. Loop took "
252  << std::chrono::hh_mm_ss(loop_time)
253  << " to process " << m_nLocalCreatedEvts
254  << " events.");
255  // Been told we've reached end
256  // Provide status to master
258  // At end of stream, we need to *fully* drain the scheduler
259  StatusCode sc = StatusCode::SUCCESS;
260  std::size_t numSlots = m_whiteboard->getNumberOfStores();
261  while (sc.isSuccess() && m_schedulerSvc->freeSlots() < numSlots) {
263  }
264  status.status = sc;
265  status.createdEvents = m_nLocalCreatedEvts;
266  status.skippedEvents = m_nLocalSkippedEvts;
267  status.finishedEvents = m_nLocalFinishedEvts;
268  m_clusterSvc->sendMessage(
270  return sc;
271  }
272 
273  // Any other message other than ProvideEvent would now be an error
274  if (msg.messageType != ClusterMessageType::ProvideEvent ||
275  msg.source != 0) {
276  ATH_MSG_ERROR("Received unexpected message "
277  << std::format("{}", msg.messageType) << " from "
278  << msg.source);
279  return StatusCode::FAILURE;
280  }
281 
282  int evt = get<int>(msg.payload);
283  ATH_MSG_INFO("Starting event " << evt);
285  evt, end_of_stream,
286  std::chrono::duration_cast<std::chrono::nanoseconds>(request_time)
287  .count());
288  if (sc.isFailure() && !sc.isRecoverable()) {
290  status.status = sc;
291  status.createdEvents = m_nLocalCreatedEvts;
292  status.skippedEvents = m_nLocalSkippedEvts;
293  status.finishedEvents = m_nLocalFinishedEvts;
294  m_clusterSvc->sendMessage(
296  return sc;
297  }
298  if (end_of_stream || m_terminateLoop) {
299  auto loop_time = Clock::now() - start;
300  ATH_MSG_INFO("Worker " << m_clusterSvc->rank() << " DONE. Loop took "
301  << std::chrono::hh_mm_ss(loop_time)
302  << " to process " << m_nLocalCreatedEvts
303  << " events.");
304  // reached end of stream, drain scheduler
306  // At end of stream, we need to *fully* drain the scheduler
307  StatusCode sc = StatusCode::SUCCESS;
308  std::size_t numSlots = m_whiteboard->getNumberOfStores();
309  while (sc.isSuccess() && m_schedulerSvc->freeSlots() < numSlots) {
311  }
312  status.status = sc;
313  status.createdEvents = m_nLocalCreatedEvts;
314  status.skippedEvents = m_nLocalSkippedEvts;
315  status.finishedEvents = m_nLocalFinishedEvts;
316  m_clusterSvc->sendMessage(
318  return sc;
319  }
320  }
321 }
322 
324 StatusCode MPIHiveEventLoopMgr::insertEvent(int eventIdx, bool& endOfStream,
325  std::int64_t requestTime_ns) {
326  // fast-forward to event
327  // Create the event context now so next writes into the next slot when
328  // skipping, not the one that's being used
329  endOfStream = false;
330  auto ctx = createEventContext();
331  Gaudi::Hive::setCurrentContext(ctx);
332  if (!ctx.valid()) {
333  endOfStream = true; // BUG: Doesn't actually mean end of stream. Remove
334  // after making sure!
335  return StatusCode::FAILURE;
336  }
337 
338  ATH_CHECK(seek(eventIdx));
339  // execute event
340  StatusCode sc = executeEvent(std::move(ctx));
341  const auto evtID = m_lastEventContext.eventID();
342  m_clusterSvc->log_addEvent(eventIdx, evtID.run_number(), evtID.event_number(),
343  requestTime_ns);
344 
345  if (sc.isRecoverable()) {
347  } else if (sc.isSuccess()) {
349  }
350  return sc;
351 }
352 
356 
357  StatusCode sc(StatusCode::SUCCESS);
358 
359  // maybe we can do better
360  std::vector<std::unique_ptr<EventContext>> finishedEvtContexts;
361 
362  EventContext* finishedEvtContext(nullptr);
363 
364  // Here we wait not to loose cpu resources
365  ATH_MSG_DEBUG("drainLocalScheduler: [" << m_nLocalFinishedEvts
366  << "] Waiting for a context");
367  sc = m_schedulerSvc->popFinishedEvent(finishedEvtContext);
368 
369  // We got past it: cache the pointer
370  if (sc.isSuccess()) {
371  ATH_MSG_DEBUG("drainLocalScheduler: scheduler not empty: Context "
372  << finishedEvtContext);
373  finishedEvtContexts.emplace_back(finishedEvtContext);
374  } else {
375  // no more events left in scheduler to be drained
376  ATH_MSG_DEBUG("drainLocalScheduler: scheduler empty");
377  return StatusCode::SUCCESS;
378  }
379 
380  // Let's see if we can pop other event contexts
381  while (m_schedulerSvc->tryPopFinishedEvent(finishedEvtContext).isSuccess()) {
382  finishedEvtContexts.emplace_back(finishedEvtContext);
383  }
384 
385  // Now we flush them
386  StatusCode fail(StatusCode::SUCCESS);
387  for (auto& thisFinishedEvtContext : finishedEvtContexts) {
388  if (!thisFinishedEvtContext) {
389  ATH_MSG_FATAL("Detected nullptr ctxt while clearing WB!");
390  fail = StatusCode::FAILURE;
391  continue;
392  }
393 
394  // Update event log
395  m_clusterSvc->log_completeEvent(
396  thisFinishedEvtContext->eventID().run_number(),
397  thisFinishedEvtContext->eventID().event_number(),
398  m_aess->eventStatus(*thisFinishedEvtContext));
399 
400  if (m_aess->eventStatus(*thisFinishedEvtContext) != EventStatus::Success) {
401  ATH_MSG_FATAL("Failed event detected on "
402  << thisFinishedEvtContext << " w/ fail mode: "
403  << m_aess->eventStatus(*thisFinishedEvtContext));
404  thisFinishedEvtContext.reset();
405  fail = StatusCode::FAILURE;
406  continue;
407  }
408 
409  EventID::number_type n_run(0);
410  EventID::event_number_t n_evt(0);
411 
412  if (m_whiteboard->selectStore(thisFinishedEvtContext->slot()).isSuccess()) {
413  n_run = thisFinishedEvtContext->eventID().run_number();
414  n_evt = thisFinishedEvtContext->eventID().event_number();
415  } else {
416  ATH_MSG_ERROR("DrainSched: unable to select store "
417  << thisFinishedEvtContext->slot());
418  thisFinishedEvtContext.reset();
419  fail = StatusCode::FAILURE;
420  continue;
421  }
422 
423  // Some code still needs global context in addition to that passed in the
424  // incident
425  Gaudi::Hive::setCurrentContext(*thisFinishedEvtContext);
426  m_incidentSvc->fireIncident(
427  Incident(name(), IncidentType::EndProcessing, *thisFinishedEvtContext));
428 
429  ATH_MSG_DEBUG("Clearing slot "
430  << thisFinishedEvtContext->slot() << " (event "
431  << thisFinishedEvtContext->evt() << ") of the whiteboard");
432 
433  StatusCode sc = clearWBSlot(thisFinishedEvtContext->slot());
434  if (!sc.isSuccess()) {
435  ATH_MSG_ERROR("Whiteboard slot " << thisFinishedEvtContext->slot()
436  << " could not be properly cleared");
437  if (fail != StatusCode::FAILURE) {
438  fail = sc;
439  }
440  thisFinishedEvtContext.reset();
441  continue;
442  }
443 
445 
446  writeHistograms().ignore();
447  ++m_proc;
448 
449  if (m_doEvtHeartbeat) {
450  if (!m_useTools) {
451  ATH_MSG_INFO(" ===>>> done processing event #"
452  << n_evt << ", run #" << n_run << " on slot "
453  << thisFinishedEvtContext->slot() << ", " << m_proc
454  << " events processed so far <<<===");
455  } else {
456  ATH_MSG_INFO(" ===>>> done processing event #"
457  << n_evt << ", run #" << n_run << " on slot "
458  << thisFinishedEvtContext->slot() << ", " << m_nev
459  << " events read and " << m_proc
460  << " events processed so far <<<===");
461  }
462  std::ofstream outfile("eventLoopHeartBeat.txt");
463  if (!outfile) {
464  ATH_MSG_ERROR(" unable to open: eventLoopHeartBeat.txt");
465  fail = StatusCode::FAILURE;
466  thisFinishedEvtContext.reset();
467  continue;
468  }
469  outfile << " done processing event #" << n_evt << ", run #" << n_run
470  << " " << m_nev << " events read so far <<<===" << std::endl;
471  outfile.close();
472  }
473 
474  ATH_MSG_DEBUG("drainLocalScheduler thisFinishedEvtContext: "
475  << thisFinishedEvtContext);
476 
477  thisFinishedEvtContext.reset();
478  }
479 
480  return fail;
481 }
482 
484  return m_eventStore.get();
485 }
python.ZdcPhysRecConfig.start_time
start_time
Definition: ZdcPhysRecConfig.py:74
AthenaHiveEventLoopMgr::size
virtual int size() override
Return the size of the collection.
Definition: AthenaHiveEventLoopMgr.cxx:833
AthenaHiveEventLoopMgr::m_evtSelector
IEvtSelector * m_evtSelector
Reference to the Event Selector.
Definition: AthenaHiveEventLoopMgr.h:86
ATH_MSG_FATAL
#define ATH_MSG_FATAL(x)
Definition: AthMsgStreamMacros.h:34
vtune_athena.format
format
Definition: vtune_athena.py:14
AthCheckMacros.h
ATH_MSG_INFO
#define ATH_MSG_INFO(x)
Definition: AthMsgStreamMacros.h:31
MPIHiveEventLoopMgr::insertEvent
StatusCode insertEvent(int eventIdx, bool &endOfStream, std::int64_t requestTime_ns)
Insert an event into the local scheduler.
Definition: MPIHiveEventLoopMgr.cxx:324
ClusterMessage::WorkerStatus
Definition: ClusterMessage.h:43
AthenaHiveEventLoopMgr::m_incidentSvc
IIncidentSvc_t m_incidentSvc
Reference to the incident service.
Definition: AthenaHiveEventLoopMgr.h:79
AthenaHiveEventLoopMgr
The default ATLAS batch event loop manager.
Definition: AthenaHiveEventLoopMgr.h:72
mergePhysValFiles.start
start
Definition: DataQuality/DataQualityUtils/scripts/mergePhysValFiles.py:13
MPIHiveEventLoopMgr.h
The MPI event loop manager.
MPIHiveEventLoopMgr::m_evtSelectorCurrentPos
int m_evtSelectorCurrentPos
Definition: MPIHiveEventLoopMgr.h:72
DeMoUpdate.statuses
list statuses
Definition: DeMoUpdate.py:568
AthenaHiveEventLoopMgr::name
virtual const std::string & name() const override
Definition: AthenaHiveEventLoopMgr.h:228
ClusterMessageType::RequestEvent
@ RequestEvent
MPIHiveEventLoopMgr::MPIHiveEventLoopMgr
MPIHiveEventLoopMgr(const std::string &name, ISvcLocator *svcLoc)
Standard Constructor.
Definition: MPIHiveEventLoopMgr.cxx:21
LArG4FSStartPointFilter.evt
evt
Definition: LArG4FSStartPointFilter.py:42
AthenaHiveEventLoopMgr::m_eventStore
StoreGateSvc_t m_eventStore
Reference to StoreGateSvc;.
Definition: AthenaHiveEventLoopMgr.h:83
XMLtoHeader.count
count
Definition: XMLtoHeader.py:84
AthenaPoolTestRead.sc
sc
Definition: AthenaPoolTestRead.py:27
AthenaHiveEventLoopMgr::m_schedulerSvc
SmartIF< IScheduler > m_schedulerSvc
A shortcut for the scheduler.
Definition: AthenaHiveEventLoopMgr.h:166
AthenaHiveEventLoopMgr::m_proc
unsigned int m_proc
Definition: AthenaHiveEventLoopMgr.h:249
AthenaHiveEventLoopMgr::clearWBSlot
StatusCode clearWBSlot(int evtSlot)
Clear a slot in the WB.
Definition: AthenaHiveEventLoopMgr.cxx:1304
event_number_t
EventIDBase::event_number_t event_number_t
Definition: IEvtIdModifierSvc.h:30
ClusterMessageType::EmergencyStop
@ EmergencyStop
MPIHiveEventLoopMgr::~MPIHiveEventLoopMgr
virtual ~MPIHiveEventLoopMgr()
Standard Destructor.
StoreGateSvc
The Athena Transient Store API.
Definition: StoreGateSvc.h:120
python.handimod.now
now
Definition: handimod.py:674
AthenaHiveEventLoopMgr::m_appMgrProperty
SmartIF< IProperty > m_appMgrProperty
Property interface of ApplicationMgr.
Definition: AthenaHiveEventLoopMgr.h:163
AthenaHiveEventLoopMgr::m_aess
SmartIF< IAlgExecStateSvc > m_aess
Reference to the Algorithm Execution State Svc.
Definition: AthenaHiveEventLoopMgr.h:160
ATH_MSG_ERROR
#define ATH_MSG_ERROR(x)
Definition: AthMsgStreamMacros.h:33
MPIHiveEventLoopMgr::workerEventLoop
StatusCode workerEventLoop()
Worker event loop (runs on worker, requests events over MPI)
Definition: MPIHiveEventLoopMgr.cxx:210
EventID::number_type
EventIDBase::number_type number_type
Definition: EventID.h:37
lumiFormat.i
int i
Definition: lumiFormat.py:85
AthenaHiveEventLoopMgr::m_doEvtHeartbeat
bool m_doEvtHeartbeat
Definition: AthenaHiveEventLoopMgr.h:251
MPIHiveEventLoopMgr::m_nLocalFinishedEvts
int m_nLocalFinishedEvts
Definition: MPIHiveEventLoopMgr.h:54
EL::StatusCode
::StatusCode StatusCode
StatusCode definition for legacy code.
Definition: PhysicsAnalysis/D3PDTools/EventLoop/EventLoop/StatusCode.h:22
ATH_MSG_DEBUG
#define ATH_MSG_DEBUG(x)
Definition: AthMsgStreamMacros.h:29
ATH_CHECK
#define ATH_CHECK
Definition: AthCheckMacros.h:40
MPIHiveEventLoopMgr::m_firstEventIndex
UnsignedIntegerProperty m_firstEventIndex
Definition: MPIHiveEventLoopMgr.h:70
AthenaHiveEventLoopMgr::m_terminateLoop
bool m_terminateLoop
Definition: AthenaHiveEventLoopMgr.h:246
Clock
std::chrono::high_resolution_clock Clock
Definition: MPIHiveEventLoopMgr.cxx:18
ClusterMessageType::FinalWorkerStatus
@ FinalWorkerStatus
AthMessaging::msg
MsgStream & msg() const
The standard message stream.
Definition: AthMessaging.h:164
AthenaHiveEventLoopMgr::writeHistograms
virtual StatusCode writeHistograms(bool force=false)
Dump out histograms as needed.
Definition: AthenaHiveEventLoopMgr.cxx:428
ClusterMessage
A class describing a message sent between nodes in a cluster.
Definition: ClusterMessage.h:30
MPIHiveEventLoopMgr::masterEventLoop
StatusCode masterEventLoop(int maxEvt)
Master event loop (runs on master, provides events over MPI)
Definition: MPIHiveEventLoopMgr.cxx:62
AthenaHiveEventLoopMgr::executeEvent
virtual StatusCode executeEvent(EventContext &&ctx) override
implementation of IEventProcessor::executeEvent(void* par)
Definition: AthenaHiveEventLoopMgr.cxx:501
MPIHiveEventLoopMgr::drainLocalScheduler
StatusCode drainLocalScheduler()
Drain the local scheduler of any (at least one) completed events.
Definition: MPIHiveEventLoopMgr.cxx:355
name
std::string name
Definition: Control/AthContainers/Root/debug.cxx:240
AthenaHiveEventLoopMgr::createEventContext
virtual EventContext createEventContext() override
Create event context.
Definition: AthenaHiveEventLoopMgr.cxx:1167
MPIHiveEventLoopMgr::initialize
virtual StatusCode initialize() override
implementation of IAppMgrUI::initalize
Definition: MPIHiveEventLoopMgr.cxx:30
MPIHiveEventLoopMgr::finalize
virtual StatusCode finalize() override
implementation of IAppMgrUI::finalize
Definition: MPIHiveEventLoopMgr.cxx:38
MPIHiveEventLoopMgr::m_nLocalCreatedEvts
int m_nLocalCreatedEvts
Definition: MPIHiveEventLoopMgr.h:52
ClusterMessageType::WorkerError
@ WorkerError
AthenaHiveEventLoopMgr::initialize
virtual StatusCode initialize() override
implementation of IAppMgrUI::initalize
Definition: AthenaHiveEventLoopMgr.cxx:136
AthenaHiveEventLoopMgr::finalize
virtual StatusCode finalize() override
implementation of IAppMgrUI::finalize
Definition: AthenaHiveEventLoopMgr.cxx:360
MPIHiveEventLoopMgr::m_clusterSvc
ServiceHandle< IMPIClusterSvc > m_clusterSvc
Reference to the MPIClusterSvc.
Definition: MPIHiveEventLoopMgr.h:38
python.CaloAddPedShiftConfig.int
int
Definition: CaloAddPedShiftConfig.py:45
ClusterMessageType::EventsDone
@ EventsDone
AthenaHiveEventLoopMgr::m_whiteboard
SmartIF< IHiveWhiteBoard > m_whiteboard
Reference to the Whiteboard interface.
Definition: AthenaHiveEventLoopMgr.h:154
AthenaHiveEventLoopMgr::m_lastEventContext
EventContext m_lastEventContext
Definition: AthenaHiveEventLoopMgr.h:255
merge.status
status
Definition: merge.py:16
AthenaHiveEventLoopMgr::m_nev
unsigned int m_nev
events processed
Definition: AthenaHiveEventLoopMgr.h:248
ClusterMessageType::ProvideEvent
@ ProvideEvent
PrepareReferenceFile.outfile
outfile
Definition: PrepareReferenceFile.py:42
MPIHiveEventLoopMgr::eventStore
StoreGateSvc * eventStore() const
Definition: MPIHiveEventLoopMgr.cxx:483
MPIHiveEventLoopMgr::nextEvent
virtual StatusCode nextEvent(int maxevt) override
implementation of IAppMgrUI::nextEvent. maxevt==0 returns immediately
Definition: MPIHiveEventLoopMgr.cxx:44
ClusterMessage.h
AthenaHiveEventLoopMgr::seek
virtual StatusCode seek(int evt) override
Seek to a given event.
Definition: AthenaHiveEventLoopMgr.cxx:793
AthenaHiveEventLoopMgr::m_useTools
bool m_useTools
Definition: AthenaHiveEventLoopMgr.h:250
beamspotman.fail
def fail(message)
Definition: beamspotman.py:197
MPIHiveEventLoopMgr::m_nLocalSkippedEvts
int m_nLocalSkippedEvts
Definition: MPIHiveEventLoopMgr.h:53