ATLAS Offline Software
Loading...
Searching...
No Matches
EvaluateModelWithAthInfer.cxx
Go to the documentation of this file.
1// Copyright (C) 2002-2025 CERN for the benefit of the ATLAS collaboration
2
3// Local include(s).
5
6// Framework include(s).
8#include "EvaluateUtils.h"
10
11namespace AthOnnx {
12
14 // Fetch tools
15 ATH_CHECK( m_onnxTool.retrieve() );
16
17 if(m_batchSize > 10000){
18 ATH_MSG_INFO("The total no. of sample crossed the no. of available sample ....");
19 return StatusCode::FAILURE;
20 }
21 // read input file, and the target file for comparison.
22 std::string pixelFilePath = PathResolver::find_calib_file(m_pixelFileName.value());
23 ATH_MSG_INFO( "Using pixel file: " << pixelFilePath );
24
26 ATH_MSG_INFO("Total no. of samples: "<<m_input_tensor_values_notFlat.size());
27
28 return StatusCode::SUCCESS;
29}
30
31StatusCode EvaluateModelWithAthInfer::execute( [[maybe_unused]] const EventContext& ctx ) const {
32
33 // prepare inputs
34 std::vector<float> inputDataVector;
35 inputDataVector.reserve(m_input_tensor_values_notFlat.size());
36 for (const std::vector<std::vector<float> >& imageData : m_input_tensor_values_notFlat){
37 std::vector<float> flatten = AthOnnxUtils::flattenNestedVectors(imageData);
38 inputDataVector.insert(inputDataVector.end(), flatten.begin(), flatten.end());
39 }
40 std::vector<int64_t> inputShape = {m_batchSize, 28, 28};
41
42 AthInfer::InputDataMap inputData;
43 inputData["flatten_input:0"] = std::make_pair(
44 inputShape, std::move(inputDataVector)
45 );
46
47 AthInfer::OutputDataMap outputData;
48 outputData["dense_1/Softmax:0"] = std::make_pair(
49 std::vector<int64_t>{m_batchSize, 10}, std::vector<float>{}
50 );
51
52 ATH_CHECK(m_onnxTool->inference(inputData, outputData));
53
54 auto& outputScores = std::get<std::vector<float>>(outputData["dense_1/Softmax:0"].second);
55 auto inRange = [&outputScores](int idx)->bool{return (idx>=0) and (idx<std::ssize(outputScores));};
56 ATH_MSG_DEBUG("Label for the input test data: ");
57 for(int ibatch = 0; ibatch < m_batchSize; ibatch++){
58 float max = -999;
59 int max_index{-1};
60 for (int i = 0; i < 10; i++){
61 ATH_MSG_DEBUG("Score for class "<< i <<" = "<<outputScores[i] << " in batch " << ibatch);
62 int index = i + ibatch * 10;
63 if (not inRange(index)) continue;
64 if (max < outputScores[index]){
65 max = outputScores[index];
66 max_index = index;
67 }
68 }
69 if (not inRange(max_index)){
70 ATH_MSG_ERROR("No maximum found in EvaluateModelWithAthInfer::execute");
71 return StatusCode::FAILURE;
72 }
73 ATH_MSG_DEBUG("Class: "<<max_index<<" has the highest score: "<<outputScores[max_index] << " in batch " << ibatch);
74 }
75
76 return StatusCode::SUCCESS;
77}
78
79} // namespace AthOnnx
#define ATH_CHECK
Evaluate an expression and check for errors.
#define ATH_MSG_ERROR(x)
#define ATH_MSG_INFO(x)
#define ATH_MSG_DEBUG(x)
bool inRange(const double *boundaries, const double value, const double tolerance=0.02)
#define max(a, b)
Definition cfImp.cxx:41
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
ToolHandle< AthInfer::IAthInferenceTool > m_onnxTool
Tool handler for onnx inference session.
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
virtual StatusCode initialize() override
Function initialising the algorithm.
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
static std::string find_calib_file(const std::string &logical_file_name)
std::map< std::string, InferenceData > OutputDataMap
std::map< std::string, InferenceData > InputDataMap
std::vector< T > flattenNestedVectors(const std::vector< std::vector< T > > &features)
Definition OnnxUtils.h:20
Namespace holding all of the Onnx Runtime example code.
std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat(const std::string &full_path)
Definition index.py:1