163{
164 const int batchSize = inputTensorValues.rows();
167
168
169
170 if (inputNodeDims[0] == -1)
171 {
172 inputNodeDims[0] = batchSize;
173 }
174 if (outputNodeDims[0] == -1)
175 {
176 outputNodeDims[0] = batchSize;
177 }
178
179 if(inputNodeDims[1] != inputTensorValues.cols())
180 {
181 throw std::runtime_error("runONNXInference: feature size doesn't match the input size: inputSize required: " + std::to_string(inputNodeDims[1]) + " inputSize provided: " + std::to_string(inputTensorValues.cols()));
182 }
183
184 if (batchSize != 1 &&(inputNodeDims[0] != batchSize || outputNodeDims[0] != batchSize))
185 {
186 throw std::runtime_error("runONNXInference: batch size doesn't match the input or output node size");
187 }
188
189
190 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
191 Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputTensorValues.data(), inputTensorValues.size(), inputNodeDims.data(), inputNodeDims.size());
192
193 if (!inputTensor.IsTensor())
194 {
195 throw std::runtime_error("runONNXInference: conversion of input to Tensor failed. ");
196 }
197
198 Ort::RunOptions run_options;
199 std::vector<Ort::Value> outputTensors =
203
204
205 if (!outputTensors[0].IsTensor() || (outputTensors.size() !=
m_outputNodeNames.size())) {
206 throw std::runtime_error("runONNXInference: calculation of output failed. ");
207 }
208
209
210 std::map<int, Eigen::MatrixXf> outputTensorMap;
211 size_t numOutputNodes =
m_session->GetOutputCount();
212 for (
size_t i=0;
i<numOutputNodes;
i++){
213
214
215 float*
output = outputTensors.at(i).GetTensorMutableData<
float>();
216 Ort::TypeInfo outputTypeInfo =
m_session->GetOutputTypeInfo(i);
217 auto outputTensorInfo = outputTypeInfo.GetTensorTypeAndShapeInfo();
218
219
220 outputNodeDims = outputTensorInfo.GetShape();
221
222 int nNodes = outputNodeDims.size() > 1 ? outputNodeDims[1] : 1;
223 Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> batchMatrix(batchSize, nNodes);
224 for (int j = 0; j < batchSize; j++)
225 {
226 Eigen::VectorXf
vec(nNodes);
227 for (
int k = 0;
k<nNodes;
k++)
228 {
229 float val =
output[j * outputNodeDims[1] +
k];
231 }
232 batchMatrix.row(j) =
vec;
233 }
234 outputTensorMap[
i] = std::move(batchMatrix);
235 }
236 return outputTensorMap;
237}
std::vector< size_t > vec