ATLAS Offline Software
Loading...
Searching...
No Matches
ExampleMLInferenceWithTriton.cxx
Go to the documentation of this file.
1// Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
2
3// Local include(s).
5
6// Framework include(s).
8#include <arpa/inet.h>
9#include <utility> //std::pair
10#include <fstream>
11
12namespace AthInfer {
13
15 // Fetch tools
16 ATH_CHECK( m_tritonTool.retrieve() );
17
18 if(m_batchSize > 10000){
19 ATH_MSG_INFO("The total no. of sample crossed the no. of available sample ....");
20 return StatusCode::FAILURE;
21 }
22 // read input file, and the target file for comparison.
23 std::string pixelFilePath = PathResolver::find_calib_file(m_pixelFileName);
24 ATH_MSG_INFO( "Using pixel file: " << pixelFilePath );
25
27 ATH_MSG_INFO("Total no. of samples: "<<m_input_tensor_values_notFlat.size());
28
29 return StatusCode::SUCCESS;
30}
31
32StatusCode ExampleMLInferenceWithTriton::execute( [[maybe_unused]] const EventContext& ctx ) const {
33
34 // prepare inputs
35 std::vector<float> inputDataVector;
36 inputDataVector.reserve(m_input_tensor_values_notFlat.size());
37 for (const std::vector<std::vector<float> >& imageData : m_input_tensor_values_notFlat){
38
39 std::vector<float> flatten;
40 int total_size = 0;
41 for(const auto& feature : imageData) total_size += feature.size();
42 flatten.reserve(total_size);
43 for (const auto& feature : imageData)
44 for (const auto& elem : feature)
45 flatten.push_back(elem);
46
47 inputDataVector.insert(inputDataVector.end(), flatten.begin(), flatten.end());
48 }
49 std::vector<int64_t> inputShape = {m_batchSize, 28, 28};
50
51 AthInfer::InputDataMap inputData;
52 inputData["flatten_input:0"] = std::make_pair(
53 inputShape, std::move(inputDataVector)
54 );
55
56 AthInfer::OutputDataMap outputData;
57 outputData["dense_1/Softmax:0"] = std::make_pair(
58 std::vector<int64_t>{m_batchSize, 10}, std::vector<float>{}
59 );
60
61 ATH_CHECK(m_tritonTool->inference(inputData, outputData));
62
63 auto& outputScores = std::get<std::vector<float>>(outputData["dense_1/Softmax:0"].second);
64 auto inRange = [&outputScores](int idx)->bool{return (idx>=0) and (idx<std::ssize(outputScores));};
65 ATH_MSG_DEBUG("Label for the input test data: ");
66 for(int ibatch = 0; ibatch < m_batchSize; ibatch++){
67 float max = -999;
68 int max_index{-1};
69 for (int i = 0; i < 10; i++){
70 ATH_MSG_DEBUG("Score for class "<< i <<" = "<<outputScores[i] << " in batch " << ibatch);
71 int index = i + ibatch * 10;
72 if (not inRange(index)) continue;
73 if (max < outputScores[index]){
74 max = outputScores[index];
75 max_index = index;
76 }
77 }
78 if (not inRange(max_index)){
79 ATH_MSG_ERROR("No maximum found in ExampleMLInferenceWithTriton::execute");
80 return StatusCode::FAILURE;
81 }
82 ATH_MSG_DEBUG("Class: "<<max_index<<" has the highest score: "<<outputScores[max_index] << " in batch " << ibatch);
83 }
84
85 return StatusCode::SUCCESS;
86}
87
88std::vector<std::vector<std::vector<float>>>
90{
91 std::vector<std::vector<std::vector<float>>> input_tensor_values;
92 input_tensor_values.resize(10000, std::vector<std::vector<float> >(28,std::vector<float>(28)));
93 std::ifstream file (full_path.c_str(), std::ios::binary);
94 int magic_number=0;
95 int number_of_images=0;
96 int n_rows=0;
97 int n_cols=0;
98 file.read(reinterpret_cast<char*>(&magic_number),sizeof(magic_number));
99 magic_number= ntohl(magic_number);
100 file.read(reinterpret_cast<char*>(&number_of_images),sizeof(number_of_images));
101 number_of_images= ntohl(number_of_images);
102 file.read(reinterpret_cast<char*>(&n_rows),sizeof(n_rows));
103 n_rows= ntohl(n_rows);
104 file.read(reinterpret_cast<char*>(&n_cols),sizeof(n_cols));
105 n_cols= ntohl(n_cols);
106 for(int i=0;i<number_of_images;++i)
107 {
108 for(int r=0;r<n_rows;++r)
109 {
110 for(int c=0;c<n_cols;++c)
111 {
112 unsigned char temp=0;
113 file.read(reinterpret_cast<char*>(&temp),sizeof(temp));
114 input_tensor_values[i][r][c]= float(temp)/255;
115 }
116 }
117 }
118 return input_tensor_values;
119}
120
121}
#define ATH_CHECK
Evaluate an expression and check for errors.
#define ATH_MSG_ERROR(x)
#define ATH_MSG_INFO(x)
#define ATH_MSG_DEBUG(x)
bool inRange(const double *boundaries, const double value, const double tolerance=0.02)
#define max(a, b)
Definition cfImp.cxx:41
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
virtual StatusCode initialize() override
Function initialising the algorithm.
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat(const std::string &full_path) const
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
ToolHandle< AthInfer::IAthInferenceTool > m_tritonTool
Tool handle for the Triton client.
static std::string find_calib_file(const std::string &logical_file_name)
int r
Definition globals.cxx:22
std::map< std::string, InferenceData > OutputDataMap
std::map< std::string, InferenceData > InputDataMap
Definition index.py:1
TFile * file