19 ATH_MSG_INFO(
"The total no. of sample crossed the no. of available sample ....");
20 return StatusCode::FAILURE;
29 return StatusCode::SUCCESS;
35 std::vector<float> inputDataVector;
39 std::vector<float> flatten;
41 for(
const auto& feature : imageData) total_size += feature.size();
42 flatten.reserve(total_size);
43 for (
const auto& feature : imageData)
44 for (
const auto& elem : feature)
45 flatten.push_back(elem);
47 inputDataVector.insert(inputDataVector.end(), flatten.begin(), flatten.end());
49 std::vector<int64_t> inputShape = {
m_batchSize, 28, 28};
52 inputData[
"flatten_input:0"] = std::make_pair(
53 inputShape, std::move(inputDataVector)
57 outputData[
"dense_1/Softmax:0"] = std::make_pair(
58 std::vector<int64_t>{
m_batchSize, 10}, std::vector<float>{}
63 auto& outputScores = std::get<std::vector<float>>(outputData[
"dense_1/Softmax:0"].second);
64 auto inRange = [&outputScores](
int idx)->
bool{
return (idx>=0) and (idx<std::ssize(outputScores));};
66 for(
int ibatch = 0; ibatch <
m_batchSize; ibatch++){
69 for (
int i = 0; i < 10; i++){
70 ATH_MSG_DEBUG(
"Score for class "<< i <<
" = "<<outputScores[i] <<
" in batch " << ibatch);
71 int index = i + ibatch * 10;
79 ATH_MSG_ERROR(
"No maximum found in ExampleMLInferenceWithTriton::execute");
80 return StatusCode::FAILURE;
82 ATH_MSG_DEBUG(
"Class: "<<max_index<<
" has the highest score: "<<outputScores[max_index] <<
" in batch " << ibatch);
85 return StatusCode::SUCCESS;
88std::vector<std::vector<std::vector<float>>>
91 std::vector<std::vector<std::vector<float>>> input_tensor_values;
92 input_tensor_values.resize(10000, std::vector<std::vector<float> >(28,std::vector<float>(28)));
93 std::ifstream
file (full_path.c_str(), std::ios::binary);
95 int number_of_images=0;
98 file.read(
reinterpret_cast<char*
>(&magic_number),
sizeof(magic_number));
99 magic_number= ntohl(magic_number);
100 file.read(
reinterpret_cast<char*
>(&number_of_images),
sizeof(number_of_images));
101 number_of_images= ntohl(number_of_images);
102 file.read(
reinterpret_cast<char*
>(&n_rows),
sizeof(n_rows));
103 n_rows= ntohl(n_rows);
104 file.read(
reinterpret_cast<char*
>(&n_cols),
sizeof(n_cols));
105 n_cols= ntohl(n_cols);
106 for(
int i=0;i<number_of_images;++i)
108 for(
int r=0;
r<n_rows;++
r)
110 for(
int c=0;c<n_cols;++c)
112 unsigned char temp=0;
113 file.read(
reinterpret_cast<char*
>(&temp),
sizeof(temp));
114 input_tensor_values[i][
r][c]= float(temp)/255;
118 return input_tensor_values;
#define ATH_CHECK
Evaluate an expression and check for errors.
bool inRange(const double *boundaries, const double value, const double tolerance=0.02)
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
virtual StatusCode initialize() override
Function initialising the algorithm.
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat(const std::string &full_path) const
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
ToolHandle< AthInfer::IAthInferenceTool > m_tritonTool
Tool handle for the Triton client.
static std::string find_calib_file(const std::string &logical_file_name)
std::map< std::string, InferenceData > OutputDataMap
std::map< std::string, InferenceData > InputDataMap