2#ifndef AthOnnx_IOnnxRuntimeInferenceTool_H
3#define AthOnnx_IOnnxRuntimeInferenceTool_H
11#include <onnxruntime_cxx_api.h>
66 virtual int64_t
getBatchSize(int64_t dataSize,
int idx = 0)
const = 0;
77 StatusCode
addInput(std::vector<Ort::Value>& inputTensors, std::vector<T>&
data,
unsigned idx = 0, int64_t batchSize = -1)
const;
88 StatusCode
addOutput(std::vector<Ort::Value>& outputTensors, std::vector<T>&
data,
unsigned idx = 0, int64_t batchSize = -1)
const;
97 virtual StatusCode
inference(std::vector<Ort::Value>& inputTensors, std::vector<Ort::Value>& outputTensors)
const = 0;
108 template <
typename T>
109 Ort::Value
createTensor(std::vector<T>&
data,
const std::vector<int64_t>& dataShape, int64_t batchSize)
const;
char data[hepevt_bytes_allocation_ATLAS]
Namespace holding all of the Onnx Runtime example code.