ATLAS Offline Software
EvaluateModelWithAthInfer.cxx
Go to the documentation of this file.
1 // Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
2 
3 // Local include(s).
5 
6 // Framework include(s).
8 #include "EvaluateUtils.h"
10 
11 namespace AthOnnx {
12 
14  // Fetch tools
15  ATH_CHECK( m_onnxTool.retrieve() );
16 
17  if(m_batchSize > 10000){
18  ATH_MSG_INFO("The total no. of sample crossed the no. of available sample ....");
19  return StatusCode::FAILURE;
20  }
21  // read input file, and the target file for comparison.
22  std::string pixelFilePath = PathResolver::find_file(m_pixelFileName.value(), "CALIBPATH", PathResolver::RecursiveSearch);
23  ATH_MSG_INFO( "Using pixel file: " << pixelFilePath );
24 
26  ATH_MSG_INFO("Total no. of samples: "<<m_input_tensor_values_notFlat.size());
27 
28  return StatusCode::SUCCESS;
29 }
30 
31 StatusCode EvaluateModelWithAthInfer::execute( [[maybe_unused]] const EventContext& ctx ) const {
32 
33  // prepare inputs
34  std::vector<float> inputDataVector;
35  inputDataVector.reserve(m_input_tensor_values_notFlat.size());
36  for (const std::vector<std::vector<float> >& imageData : m_input_tensor_values_notFlat){
37  std::vector<float> flatten = AthOnnxUtils::flattenNestedVectors(imageData);
38  inputDataVector.insert(inputDataVector.end(), flatten.begin(), flatten.end());
39  }
40  std::vector<int64_t> inputShape = {m_batchSize, 28, 28};
41 
42  AthInfer::InputDataMap inputData;
43  inputData["flatten_input"] = std::make_pair(
44  inputShape, std::move(inputDataVector)
45  );
46 
47  AthInfer::OutputDataMap outputData;
48  outputData["dense_1/Softmax"] = std::make_pair(
49  std::vector<int64_t>{m_batchSize, 10}, std::vector<float>{}
50  );
51 
52  ATH_CHECK(m_onnxTool->inference(inputData, outputData));
53 
54  auto& outputScores = std::get<std::vector<float>>(outputData["dense_1/Softmax"].second);
55  auto inRange = [&outputScores](int idx)->bool{return (idx>=0) and (idx<std::ssize(outputScores));};
56  ATH_MSG_DEBUG("Label for the input test data: ");
57  for(int ibatch = 0; ibatch < m_batchSize; ibatch++){
58  float max = -999;
59  int max_index{-1};
60  for (int i = 0; i < 10; i++){
61  ATH_MSG_DEBUG("Score for class "<< i <<" = "<<outputScores[i] << " in batch " << ibatch);
62  int index = i + ibatch * 10;
63  if (not inRange(index)) continue;
64  if (max < outputScores[index]){
65  max = outputScores[index];
66  max_index = index;
67  }
68  }
69  if (not inRange(max_index)){
70  ATH_MSG_ERROR("No maximum found in EvaluateModelWithAthInfer::execute");
71  return StatusCode::FAILURE;
72  }
73  ATH_MSG_DEBUG("Class: "<<max_index<<" has the highest score: "<<outputScores[max_index] << " in batch " << ibatch);
74  }
75 
76  return StatusCode::SUCCESS;
77 }
78 
79 } // namespace AthOnnx
PathResolver::RecursiveSearch
@ RecursiveSearch
Definition: PathResolver.h:28
max
#define max(a, b)
Definition: cfImp.cxx:41
ATH_MSG_INFO
#define ATH_MSG_INFO(x)
Definition: AthMsgStreamMacros.h:31
AthOnnx::EvaluateModelWithAthInfer::m_onnxTool
ToolHandle< AthInfer::IAthInferenceTool > m_onnxTool
Tool handler for onnx inference session.
Definition: EvaluateModelWithAthInfer.h:58
PathResolver::find_file
static std::string find_file(const std::string &logical_file_name, const std::string &search_path, SearchType search_type=LocalSearch)
Definition: PathResolver.cxx:251
index
Definition: index.py:1
AthOnnx::EvaluateModelWithAthInfer::execute
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
Definition: EvaluateModelWithAthInfer.cxx:31
EvaluateModelWithAthInfer.h
EvaluateUtils.h
AthOnnx::EvaluateModelWithAthInfer::m_batchSize
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
Definition: EvaluateModelWithAthInfer.h:55
AthOnnx::EvaluateModelWithAthInfer::initialize
virtual StatusCode initialize() override
Function initialising the algorithm.
Definition: EvaluateModelWithAthInfer.cxx:13
ATH_MSG_ERROR
#define ATH_MSG_ERROR(x)
Definition: AthMsgStreamMacros.h:33
AthOnnx::EvaluateModelWithAthInfer::m_input_tensor_values_notFlat
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
Definition: EvaluateModelWithAthInfer.h:62
lumiFormat.i
int i
Definition: lumiFormat.py:85
EL::StatusCode
::StatusCode StatusCode
StatusCode definition for legacy code.
Definition: PhysicsAnalysis/D3PDTools/EventLoop/EventLoop/StatusCode.h:22
ATH_MSG_DEBUG
#define ATH_MSG_DEBUG(x)
Definition: AthMsgStreamMacros.h:29
AthInfer::OutputDataMap
std::map< std::string, InferenceData > OutputDataMap
Definition: IAthInferenceTool.h:17
AthOnnx::EvaluateModelWithAthInfer::m_pixelFileName
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
Definition: EvaluateModelWithAthInfer.h:50
ATH_CHECK
#define ATH_CHECK
Definition: AthCheckMacros.h:40
inRange
bool inRange(const double *boundaries, const double value, const double tolerance=0.02)
Definition: LArSCIdVsIdTest.cxx:5
PathResolver.h
EvaluateUtils::read_mnist_pixel_notFlat
std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat(const std::string &full_path)
Definition: EvaluateUtils.cxx:11
DeMoScan.index
string index
Definition: DeMoScan.py:364
AthOnnxUtils::flattenNestedVectors
std::vector< T > flattenNestedVectors(const std::vector< std::vector< T >> &features)
Definition: OnnxUtils.h:20
LArNewCalib_DelayDump_OFC_Cali.idx
idx
Definition: LArNewCalib_DelayDump_OFC_Cali.py:69
OnnxUtils.h
AthInfer::InputDataMap
std::map< std::string, InferenceData > InputDataMap
Definition: IAthInferenceTool.h:16
AthOnnx
Namespace holding all of the Onnx Runtime example code.
Definition: EvaluateModel.cxx:11