1 // Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
3 Ort::Value AthOnnx::IOnnxRuntimeInferenceTool::createTensor(std::vector<T>& data, const std::vector<int64_t>& dataShape, int64_t batchSize) const
5 std::vector<int64_t> dataShapeCopy = dataShape;
8 for (auto& shape: dataShapeCopy) {
16 auto memoryInfo = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
17 return Ort::Value::CreateTensor<T>(
18 memoryInfo, data.data(), data.size(), dataShapeCopy.data(), dataShapeCopy.size()
23 StatusCode AthOnnx::IOnnxRuntimeInferenceTool::addInput(std::vector<Ort::Value>& inputTensors, std::vector<T>& data, unsigned idx, int64_t batchSize) const
25 if (idx >= m_numInputs ) {
26 return StatusCode::FAILURE;
29 inputTensors.push_back(std::move(createTensor(data, m_inputShapes[idx], batchSize)));
30 return StatusCode::SUCCESS;
34 StatusCode AthOnnx::IOnnxRuntimeInferenceTool::addOutput(std::vector<Ort::Value>& outputTensors, std::vector<T>& data, unsigned idx, int64_t batchSize) const
36 if (idx >= m_numOutputs ) {
37 return StatusCode::FAILURE;
39 auto tensorSize = std::accumulate(m_outputShapes[idx].begin(), m_outputShapes[idx].end(), 1, std::multiplies<int64_t>());
41 tensorSize = abs(tensorSize) * batchSize;
43 data.resize(tensorSize);
44 outputTensors.push_back(std::move(createTensor(data, m_outputShapes[idx], batchSize)));
45 return StatusCode::SUCCESS;