20 m_env = std::make_unique< Ort::Env >(ORT_LOGGING_LEVEL_WARNING,
"");
23 Ort::SessionOptions session_options;
25 session_options.SetIntraOpNumThreads(1);
26 session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
30 Ort::AllocatorWithDefaultOptions allocator;
32 size_t numInputNodes =
m_session->GetInputCount();
34 for (
size_t i = 0;
i < numInputNodes;
i++)
37 char* input_name =
new char[strlen(
name.get()) + 1];
38 strcpy(input_name,
name.get());
43 Ort::TypeInfo inputTypeInfo =
m_session->GetInputTypeInfo(
i);
44 auto tensorInfo = inputTypeInfo.GetTensorTypeAndShapeInfo();
49 size_t numOutputNodes =
m_session->GetOutputCount();
51 for (
size_t i = 0;
i < numOutputNodes;
i++)
54 char* output_name =
new char[strlen(
name.get()) + 1];
55 strcpy(output_name,
name.get());
59 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
60 auto tensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
69 for (
size_t i = 0;
i < inputTensorValues.size();
i++) {
70 vectorInput(0,
i) = inputTensorValues[
i];
73 return vectorOutput[0];
80 int batchSize = inputTensorValues.rows();
86 if (inputNodeDims[0] == -1)
88 inputNodeDims[0] = batchSize;
90 if (outputNodeDims[0] == -1)
92 outputNodeDims[0] = batchSize;
95 if(inputNodeDims[1]*inputNodeDims[2] != inputTensorValues.cols() && inputNodeDims[1] != inputTensorValues.cols())
97 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]*inputNodeDims[2]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
100 if (batchSize != 1 && (inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
102 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
107 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
108 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(),inputNodeDims.data(), inputNodeDims.size());
110 if (!inputTensor.IsTensor())
112 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
115 Ort::RunOptions run_options;
116 std::vector<Ort::Value> outputTensors =
122 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
123 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
127 float* outputTensor = outputTensors.front().GetTensorMutableData<
float>();
129 std::vector<std::vector<float>> outputTensorValues(batchSize, std::vector<float>(outputNodeDims[1], -1));
130 for (
int i = 0;
i < outputNodeDims[0];
i++)
132 for (
int j = 0; j < ((outputNodeDims.size() > 1) ? outputNodeDims[1] : 1); j++)
134 outputTensorValues[
i][j] = outputTensor[
i * outputNodeDims[1] + j];
138 return outputTensorValues;
146 const int batchSize = inputTensorValues.rows();
152 if (inputNodeDims[0] == -1)
154 inputNodeDims[0] = batchSize;
156 if (outputNodeDims[0] == -1)
158 outputNodeDims[0] = batchSize;
161 if(inputNodeDims[1] != inputTensorValues.cols())
163 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
166 if (batchSize != 1 &&(inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
168 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
172 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
173 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(), inputNodeDims.data(), inputNodeDims.size());
175 if (!inputTensor.IsTensor())
177 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
180 Ort::RunOptions run_options;
181 std::vector<Ort::Value> outputTensors =
187 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
188 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
192 std::map<int, Eigen::MatrixXf> outputTensorMap;
193 size_t numOutputNodes =
m_session->GetOutputCount();
194 for (
size_t i=0;
i<numOutputNodes;
i++){
197 float*
output = outputTensors.at(
i).GetTensorMutableData<
float>();
198 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
199 auto outputTensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
202 outputNodeDims = outputTensorInfo.GetShape();
204 int nNodes = outputNodeDims.size() > 1 ? outputNodeDims[1] : 1;
205 Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> batchMatrix(batchSize, nNodes);
206 for (
int j = 0; j < batchSize; j++)
208 Eigen::VectorXf
vec(nNodes);
209 for (
int k = 0;
k<nNodes;
k++)
211 float val =
output[j * outputNodeDims[1] +
k];
214 batchMatrix.row(j) =
vec;
216 outputTensorMap[
i] = std::move(batchMatrix);
218 return outputTensorMap;