10#include <onnxruntime_cxx_api.h>
23 for (
const auto& feature : features) total_size += feature.size();
25 std::vector<T> flatten1D;
26 flatten1D.reserve(total_size);
28 for (
const auto& feature : features)
29 for (
const auto& elem : feature)
30 flatten1D.push_back(elem);
41 const Ort::Session& session,
42 std::vector<std::vector<int64_t> >& dataShape,
43 std::vector<std::string>& nodeNames);
51 const Ort::Session& session,
52 std::vector<std::vector<int64_t> >& dataShape,
53 std::vector<std::string>& nodeNames);
57 const Ort::Session& session,
58 std::vector<std::vector<int64_t> >& dataShape,
59 std::vector<std::string>& nodeNames,
70 const std::vector<std::string>& inputNames,
71 const std::vector<Ort::Value>& inputData,
72 const std::vector<std::string>& outputNames,
73 const std::vector<Ort::Value>& outputData
80 Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
81 return Ort::Value::CreateTensor<T>(memoryInfo,
data.data(),
data.size(), dataShape.data(), dataShape.size());
char data[hepevt_bytes_allocation_ATLAS]
void inferenceWithIOBinding(Ort::Session &session, const std::vector< std::string > &inputNames, const std::vector< Ort::Value > &inputData, const std::vector< std::string > &outputNames, const std::vector< Ort::Value > &outputData)
int64_t getTensorSize(const std::vector< int64_t > &dataShape)
void getNodeInfo(const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames, bool isInput)
void getOutputNodeInfo(const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames)
void getInputNodeInfo(const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames)
std::vector< T > flattenNestedVectors(const std::vector< std::vector< T > > &features)
Ort::Value createTensor(std::vector< T > &data, const std::vector< int64_t > &dataShape)