20 m_env = std::make_unique< Ort::Env >(ORT_LOGGING_LEVEL_WARNING,
"");
23 Ort::SessionOptions session_options;
25 session_options.SetIntraOpNumThreads(1);
26 session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
30 Ort::AllocatorWithDefaultOptions allocator;
32 size_t numInputNodes =
m_session->GetInputCount();
34 for (
size_t i = 0;
i < numInputNodes;
i++)
37 char* input_name =
new char[strlen(
name.get()) + 1];
38 strcpy(input_name,
name.get());
43 Ort::TypeInfo inputTypeInfo =
m_session->GetInputTypeInfo(
i);
44 auto tensorInfo = inputTypeInfo.GetTensorTypeAndShapeInfo();
49 size_t numOutputNodes =
m_session->GetOutputCount();
51 for (
size_t i = 0;
i < numOutputNodes;
i++)
54 char* output_name =
new char[strlen(
name.get()) + 1];
55 strcpy(output_name,
name.get());
59 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
60 auto tensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
68 std::vector<std::vector<float> >
output;
69 if (inputTensorValues.size() == 0)
return output;
71 NetworkBatchInput vectorInput(inputTensorValues.size(), inputTensorValues[0].size());
72 for (
size_t i = 0;
i < inputTensorValues.size();
i++) {
73 for (
size_t j = 0; j < inputTensorValues[
i].size(); j++) {
74 vectorInput(
i,j) = inputTensorValues[
i][j];
87 for (
size_t i = 0;
i < inputTensorValues.size();
i++) {
88 vectorInput(0,
i) = inputTensorValues[
i];
91 return vectorOutput[0];
98 int batchSize = inputTensorValues.rows();
104 if (inputNodeDims[0] == -1)
106 inputNodeDims[0] = batchSize;
108 if (outputNodeDims[0] == -1)
110 outputNodeDims[0] = batchSize;
113 if(inputNodeDims[1]*inputNodeDims[2] != inputTensorValues.cols() && inputNodeDims[1] != inputTensorValues.cols())
115 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]*inputNodeDims[2]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
118 if (batchSize != 1 && (inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
120 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
125 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
126 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(),inputNodeDims.data(), inputNodeDims.size());
128 if (!inputTensor.IsTensor())
130 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
133 Ort::RunOptions run_options;
134 std::vector<Ort::Value> outputTensors =
140 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
141 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
145 float* outputTensor = outputTensors.front().GetTensorMutableData<
float>();
147 std::vector<std::vector<float>> outputTensorValues(batchSize, std::vector<float>(outputNodeDims[1], -1));
148 for (
int i = 0;
i < outputNodeDims[0];
i++)
150 for (
int j = 0; j < ((outputNodeDims.size() > 1) ? outputNodeDims[1] : 1); j++)
152 outputTensorValues[
i][j] = outputTensor[
i * outputNodeDims[1] + j];
156 return outputTensorValues;
164 const int batchSize = inputTensorValues.rows();
170 if (inputNodeDims[0] == -1)
172 inputNodeDims[0] = batchSize;
174 if (outputNodeDims[0] == -1)
176 outputNodeDims[0] = batchSize;
179 if(inputNodeDims[1] != inputTensorValues.cols())
181 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
184 if (batchSize != 1 &&(inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
186 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
190 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
191 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(), inputNodeDims.data(), inputNodeDims.size());
193 if (!inputTensor.IsTensor())
195 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
198 Ort::RunOptions run_options;
199 std::vector<Ort::Value> outputTensors =
205 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
206 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
210 std::map<int, Eigen::MatrixXf> outputTensorMap;
211 size_t numOutputNodes =
m_session->GetOutputCount();
212 for (
size_t i=0;
i<numOutputNodes;
i++){
215 float*
output = outputTensors.at(
i).GetTensorMutableData<
float>();
216 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
217 auto outputTensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
220 outputNodeDims = outputTensorInfo.GetShape();
222 int nNodes = outputNodeDims.size() > 1 ? outputNodeDims[1] : 1;
223 Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> batchMatrix(batchSize, nNodes);
224 for (
int j = 0; j < batchSize; j++)
226 Eigen::VectorXf
vec(nNodes);
227 for (
int k = 0;
k<nNodes;
k++)
229 float val =
output[j * outputNodeDims[1] +
k];
232 batchMatrix.row(j) =
vec;
234 outputTensorMap[
i] = std::move(batchMatrix);
236 return outputTensorMap;