20 m_env = std::make_unique< Ort::Env >(ORT_LOGGING_LEVEL_WARNING,
"");
23 Ort::SessionOptions session_options;
25 session_options.SetIntraOpNumThreads(1);
26 session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
30 Ort::AllocatorWithDefaultOptions allocator;
32 size_t numInputNodes =
m_session->GetInputCount();
34 for (
size_t i = 0;
i < numInputNodes;
i++)
37 char* input_name =
new char[strlen(
name.get()) + 1];
38 strcpy(input_name,
name.get());
43 Ort::TypeInfo inputTypeInfo =
m_session->GetInputTypeInfo(
i);
44 auto tensorInfo = inputTypeInfo.GetTensorTypeAndShapeInfo();
48 size_t numOutputNodes =
m_session->GetOutputCount();
50 for (
size_t i = 0;
i < numOutputNodes;
i++)
53 char* output_name =
new char[strlen(
name.get()) + 1];
54 strcpy(output_name,
name.get());
58 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
59 auto tensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
68 for (
size_t i = 0;
i < inputTensorValues.size();
i++) {
69 vectorInput(0,
i) = inputTensorValues[
i];
72 return vectorOutput[0];
79 int batchSize = inputTensorValues.rows();
85 if (inputNodeDims[0] == -1)
87 inputNodeDims[0] = batchSize;
89 if (outputNodeDims[0] == -1)
91 outputNodeDims[0] = batchSize;
94 if(inputNodeDims[1]*inputNodeDims[2] != inputTensorValues.cols())
96 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]*inputNodeDims[2]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
99 if (batchSize != 1 && (inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
101 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
106 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
107 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(),inputNodeDims.data(), inputNodeDims.size());
109 if (!inputTensor.IsTensor())
111 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
114 Ort::RunOptions run_options;
115 std::vector<Ort::Value> outputTensors =
121 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
122 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
126 float* outputTensor = outputTensors.front().GetTensorMutableData<
float>();
128 std::vector<std::vector<float>> outputTensorValues(batchSize, std::vector<float>(outputNodeDims[1], -1));
129 for (
int i = 0;
i < outputNodeDims[0];
i++)
131 for (
int j = 0; j < ((outputNodeDims.size() > 1) ? outputNodeDims[1] : 1); j++)
133 outputTensorValues[
i][j] = outputTensor[
i * outputNodeDims[1] + j];
137 return outputTensorValues;
145 const int batchSize = inputTensorValues.rows();
151 if (inputNodeDims[0] == -1)
153 inputNodeDims[0] = batchSize;
155 if (outputNodeDims[0] == -1)
157 outputNodeDims[0] = batchSize;
160 if(inputNodeDims[1] != inputTensorValues.cols())
162 throw std::runtime_error(
"runONNXInference: feature size doesn't match the input size: inputSize required: " +
std::to_string(inputNodeDims[1]) +
" inputSize provided: " +
std::to_string(inputTensorValues.cols()));
165 if (batchSize != 1 &&(inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
167 throw std::runtime_error(
"runONNXInference: batch size doesn't match the input or output node size");
171 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
172 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(), inputNodeDims.data(), inputNodeDims.size());
174 if (!inputTensor.IsTensor())
176 throw std::runtime_error(
"runONNXInference: conversion of input to Tensor failed. ");
179 Ort::RunOptions run_options;
180 std::vector<Ort::Value> outputTensors =
186 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
187 throw std::runtime_error(
"runONNXInference: calculation of output failed. ");
191 std::map<int, Eigen::MatrixXf> outputTensorMap;
192 size_t numOutputNodes =
m_session->GetOutputCount();
193 for (
size_t i=0;
i<numOutputNodes;
i++){
196 float*
output = outputTensors.at(
i).GetTensorMutableData<
float>();
197 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(
i);
198 auto outputTensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
201 outputNodeDims = outputTensorInfo.GetShape();
203 int nNodes = outputNodeDims.size() > 1 ? outputNodeDims[1] : 1;
204 Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> batchMatrix(batchSize, nNodes);
205 for (
int j = 0; j < batchSize; j++)
207 Eigen::VectorXf
vec(nNodes);
208 for (
int k = 0;
k<nNodes;
k++)
210 float val =
output[j * outputNodeDims[1] +
k];
213 batchMatrix.row(j) =
vec;
215 outputTensorMap[
i] = batchMatrix;
217 return outputTensorMap;