ATLAS Offline Software
EvaluateModel.cxx
Go to the documentation of this file.
1 // Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
2 
3 // Local include(s).
4 #include "EvaluateModel.h"
5 #include <tuple>
6 #include <fstream>
7 #include <chrono>
8 #include <arpa/inet.h>
9 
10 // Framework include(s).
11 #include "AthOnnxUtils/OnnxUtils.h"
12 
13 namespace AthOnnx {
14 
15  //*******************************************************************
16  // for reading MNIST images
17  std::vector<std::vector<std::vector<float>>> read_mnist_pixel_notFlat(const std::string &full_path) //function to load test images
18  {
19  std::vector<std::vector<std::vector<float>>> input_tensor_values;
20  input_tensor_values.resize(10000, std::vector<std::vector<float> >(28,std::vector<float>(28)));
21  std::ifstream file (full_path.c_str(), std::ios::binary);
22  int magic_number=0;
23  int number_of_images=0;
24  int n_rows=0;
25  int n_cols=0;
26  file.read((char*)&magic_number,sizeof(magic_number));
27  magic_number= ntohl(magic_number);
28  file.read((char*)&number_of_images,sizeof(number_of_images));
29  number_of_images= ntohl(number_of_images);
30  file.read((char*)&n_rows,sizeof(n_rows));
31  n_rows= ntohl(n_rows);
32  file.read((char*)&n_cols,sizeof(n_cols));
33  n_cols= ntohl(n_cols);
34  for(int i=0;i<number_of_images;++i)
35  {
36  for(int r=0;r<n_rows;++r)
37  {
38  for(int c=0;c<n_cols;++c)
39  {
40  unsigned char temp=0;
41  file.read((char*)&temp,sizeof(temp));
42  input_tensor_values[i][r][c]= float(temp)/255;
43  }
44  }
45  }
46  return input_tensor_values;
47  }
48 
49  //********************************************************************************
50  // for reading MNIST labels
51  std::vector<int> read_mnist_label(const std::string &full_path) //function to load test labels
52  {
53  std::vector<int> output_tensor_values(1*10000);
54  std::ifstream file (full_path.c_str(), std::ios::binary);
55  int magic_number=0;
56  int number_of_labels=0;
57  file.read((char*)&magic_number,sizeof(magic_number));
58  magic_number= ntohl(magic_number);
59  file.read((char*)&number_of_labels,sizeof(number_of_labels));
60  number_of_labels= ntohl(number_of_labels);
61  for(int i=0;i<number_of_labels;++i)
62  {
63  unsigned char temp=0;
64  file.read((char*)&temp,sizeof(temp));
65  output_tensor_values[i]= int(temp);
66  }
67  return output_tensor_values;
68  }
69 
71  // Fetch tools
72  ATH_CHECK( m_onnxTool.retrieve() );
73  m_onnxTool->printModelInfo();
74 
75  /*****
76  The combination of no. of batches and batch size shouldn't cross
77  the total smple size which is 10000 for this example
78  *****/
79  if(m_batchSize > 10000){
80  ATH_MSG_INFO("The total no. of sample crossed the no. of available sample ....");
81  return StatusCode::FAILURE;
82  }
83  // read input file, and the target file for comparison.
84  ATH_MSG_INFO( "Using pixel file: " << m_pixelFileName.value() );
85 
87  ATH_MSG_INFO("Total no. of samples: "<<m_input_tensor_values_notFlat.size());
88 
89  return StatusCode::SUCCESS;
90 }
91 
92  StatusCode EvaluateModel::execute( const EventContext& /*ctx*/ ) const {
93 
94  // prepare inputs
95  std::vector<float> inputData;
96  for (int ibatch = 0; ibatch < m_batchSize; ibatch++){
97  const std::vector<std::vector<float> >& imageData = m_input_tensor_values_notFlat[ibatch];
98  std::vector<float> flatten = AthOnnx::flattenNestedVectors(imageData);
99  inputData.insert(inputData.end(), flatten.begin(), flatten.end());
100  }
101 
102  int64_t batchSize = m_onnxTool->getBatchSize(inputData.size());
103  ATH_MSG_INFO("Batch size is " << batchSize << ".");
104  assert(batchSize == m_batchSize);
105 
106  // bind the input data to the input tensor
107  std::vector<Ort::Value> inputTensors;
108  ATH_CHECK( m_onnxTool->addInput(inputTensors, inputData, 0, batchSize) );
109 
110  // reserve space for output data and bind it to the output tensor
111  std::vector<float> outputScores;
112  std::vector<Ort::Value> outputTensors;
113  ATH_CHECK( m_onnxTool->addOutput(outputTensors, outputScores, 0, batchSize) );
114 
115  // run the inference
116  // the output will be filled to the outputScores.
117  ATH_CHECK( m_onnxTool->inference(inputTensors, outputTensors) );
118 
119  ATH_MSG_INFO("Label for the input test data: ");
120  for(int ibatch = 0; ibatch < m_batchSize; ibatch++){
121  float max = -999;
122  int max_index;
123  for (int i = 0; i < 10; i++){
124  ATH_MSG_DEBUG("Score for class "<< i <<" = "<<outputScores[i] << " in batch " << ibatch);
125  int index = i + ibatch * 10;
126  if (max < outputScores[index]){
127  max = outputScores[index];
128  max_index = index;
129  }
130  }
131  ATH_MSG_INFO("Class: "<<max_index<<" has the highest score: "<<outputScores[max_index] << " in batch " << ibatch);
132  }
133 
134  return StatusCode::SUCCESS;
135  }
137 
138  return StatusCode::SUCCESS;
139  }
140 
141 } // namespace AthOnnx
AthOnnx::flattenNestedVectors
std::vector< T > flattenNestedVectors(const std::vector< std::vector< T >> &features)
Definition: OnnxUtils.h:20
beamspotman.r
def r
Definition: beamspotman.py:676
AthOnnx::EvaluateModel::initialize
virtual StatusCode initialize() override
Function initialising the algorithm.
Definition: EvaluateModel.cxx:70
max
#define max(a, b)
Definition: cfImp.cxx:41
ATH_MSG_INFO
#define ATH_MSG_INFO(x)
Definition: AthMsgStreamMacros.h:31
CaloCellPos2Ntuple.int
int
Definition: CaloCellPos2Ntuple.py:24
index
Definition: index.py:1
AthOnnx::read_mnist_pixel_notFlat
std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat(const std::string &full_path)
Definition: EvaluateModel.cxx:17
AthOnnx::EvaluateModel::finalize
virtual StatusCode finalize() override
Function finalising the algorithm.
Definition: EvaluateModel.cxx:136
lumiFormat.i
int i
Definition: lumiFormat.py:92
EL::StatusCode
::StatusCode StatusCode
StatusCode definition for legacy code.
Definition: PhysicsAnalysis/D3PDTools/EventLoop/EventLoop/StatusCode.h:22
ATH_MSG_DEBUG
#define ATH_MSG_DEBUG(x)
Definition: AthMsgStreamMacros.h:29
AthOnnx::EvaluateModel::execute
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
Definition: EvaluateModel.cxx:92
file
TFile * file
Definition: tile_monitor.h:29
ATH_CHECK
#define ATH_CHECK
Definition: AthCheckMacros.h:40
AthOnnx::EvaluateModel::m_onnxTool
ToolHandle< IOnnxRuntimeInferenceTool > m_onnxTool
Tool handler for onnx inference session.
Definition: EvaluateModel.h:61
AthOnnx::read_mnist_label
std::vector< int > read_mnist_label(const std::string &full_path)
Definition: EvaluateModel.cxx:51
DeMoScan.index
string index
Definition: DeMoScan.py:362
AthOnnx::EvaluateModel::m_batchSize
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
Definition: EvaluateModel.h:58
OnnxUtils.h
AthOnnx::EvaluateModel::m_pixelFileName
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
Definition: EvaluateModel.h:53
python.compressB64.c
def c
Definition: compressB64.py:93
readCCLHist.float
float
Definition: readCCLHist.py:83
EvaluateModel.h
AthOnnx::EvaluateModel::m_input_tensor_values_notFlat
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
Definition: EvaluateModel.h:65
AthOnnx
Namespace holding all of the Onnx Runtime example code.
Definition: EvaluateModel.cxx:13