3#ifndef ATHEXONNXRUNTIME_EVALUATEMODEL_H
4#define ATHEXONNXRUNTIME_EVALUATEMODEL_H
11#include "GaudiKernel/ServiceHandle.h"
14#include <onnxruntime_cxx_api.h>
34 using AthReentrantAlgorithm::AthReentrantAlgorithm;
42 virtual StatusCode
execute(
const EventContext& ctx )
const override;
52 "dev/MLTest/2020-03-31/t10k-images-idx3-ubyte",
53 "Name of the input pixel file to load" };
56 Gaudi::Property<int>
m_batchSize {
this,
"BatchSize", 1,
"No. of elements/example in a batch"};
60 this,
"ORTInferenceTool",
"AthOnnx::OnnxRuntimeInferenceTool"
Algorithm demonstrating the usage of the ONNX Runtime C++ API.
virtual StatusCode execute(const EventContext &ctx) const override
Function executing the algorithm for a single event.
Gaudi::Property< std::string > m_pixelFileName
Name of the model file to load.
virtual StatusCode initialize() override
Function initialising the algorithm.
ToolHandle< IOnnxRuntimeInferenceTool > m_onnxTool
Tool handler for onnx inference session.
Gaudi::Property< int > m_batchSize
Following properties needed to be consdered if the .onnx model is evaluated in batch mode.
std::vector< std::vector< std::vector< float > > > m_input_tensor_values_notFlat
An algorithm that can be simultaneously executed in multiple threads.
Namespace holding all of the Onnx Runtime example code.