ATLAS Offline Software
Classes | Functions
AthOnnx Namespace Reference

Namespace holding all of the Onnx Runtime example code. More...

Classes

class  EvaluateModel
 Algorithm demonstrating the usage of the ONNX Runtime C++ API. More...
 
class  IOnnxRuntimeInferenceTool
 Interface class for creating Onnx Runtime sessions. More...
 
class  IOnnxRuntimeSessionTool
 
class  IOnnxRuntimeSvc
 Service used for managing global objects used by Onnx Runtime. More...
 
class  OnnxRuntimeInferenceTool
 
class  OnnxRuntimeSessionToolCPU
 
class  OnnxRuntimeSessionToolCUDA
 
class  OnnxRuntimeSvc
 Service implementing AthOnnx::IOnnxRuntimeSvc. More...
 

Functions

std::vector< std::vector< std::vector< float > > > read_mnist_pixel_notFlat (const std::string &full_path)
 
std::vector< int > read_mnist_label (const std::string &full_path)
 
template<typename T >
std::vector< T > flattenNestedVectors (const std::vector< std::vector< T >> &features)
 
void getInputNodeInfo (const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames)
 
void getOutputNodeInfo (const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames)
 
void getNodeInfo (const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames, bool isInput)
 
int64_t getTensorSize (const std::vector< int64_t > &dataShape)
 
void inferenceWithIOBinding (Ort::Session &session, const std::vector< std::string > &inputNames, const std::vector< Ort::Value > &inputData, const std::vector< std::string > &outputNames, const std::vector< Ort::Value > &outputData)
 
Ort::Value createTensor (std::vector< float > &data, const std::vector< int64_t > &dataShape)
 

Detailed Description

Namespace holding all of the Onnx Runtime example code.

Function Documentation

◆ createTensor()

Ort::Value AthOnnx::createTensor ( std::vector< float > &  data,
const std::vector< int64_t > &  dataShape 
)

Definition at line 81 of file OnnxUtils.cxx.

82 {
83  auto memoryInfo = Ort::MemoryInfo::CreateCpu(OrtAllocatorType::OrtArenaAllocator, OrtMemType::OrtMemTypeDefault);
84 
85  return Ort::Value::CreateTensor<float>(
86  memoryInfo,
87  data.data(),
88  data.size(),
89  dataShape.data(),
90  dataShape.size());
91 };

◆ flattenNestedVectors()

template<typename T >
std::vector<T> AthOnnx::flattenNestedVectors ( const std::vector< std::vector< T >> &  features)
inline

Definition at line 20 of file OnnxUtils.h.

20  {
21  // 1. Compute the total size required.
22  int total_size = 0;
23  for (const auto& feature : features) total_size += feature.size();
24 
25  std::vector<T> flatten1D;
26  flatten1D.reserve(total_size);
27 
28  for (const auto& feature : features)
29  for (const auto& elem : feature)
30  flatten1D.push_back(elem);
31 
32  return flatten1D;
33 }

◆ getInputNodeInfo()

void AthOnnx::getInputNodeInfo ( const Ort::Session &  session,
std::vector< std::vector< int64_t > > &  dataShape,
std::vector< std::string > &  nodeNames 
)

Definition at line 33 of file OnnxUtils.cxx.

37  {
38  getNodeInfo(session, dataShape, nodeNames, true);
39 }

◆ getNodeInfo()

void AthOnnx::getNodeInfo ( const Ort::Session &  session,
std::vector< std::vector< int64_t > > &  dataShape,
std::vector< std::string > &  nodeNames,
bool  isInput 
)

Definition at line 9 of file OnnxUtils.cxx.

14  {
15  dataShape.clear();
16  nodeNames.clear();
17 
18  size_t numNodes = isInput? session.GetInputCount(): session.GetOutputCount();
19  dataShape.reserve(numNodes);
20  nodeNames.reserve(numNodes);
21 
22  Ort::AllocatorWithDefaultOptions allocator;
23  for( std::size_t i = 0; i < numNodes; i++ ) {
24  Ort::TypeInfo typeInfo = isInput? session.GetInputTypeInfo(i): session.GetOutputTypeInfo(i);
25  auto tensorInfo = typeInfo.GetTensorTypeAndShapeInfo();
26  dataShape.emplace_back(tensorInfo.GetShape());
27 
28  auto nodeName = isInput? session.GetInputNameAllocated(i, allocator) : session.GetOutputNameAllocated(i, allocator);
29  nodeNames.emplace_back(nodeName.get());
30  }
31 }

◆ getOutputNodeInfo()

void AthOnnx::getOutputNodeInfo ( const Ort::Session &  session,
std::vector< std::vector< int64_t > > &  dataShape,
std::vector< std::string > &  nodeNames 
)

Definition at line 41 of file OnnxUtils.cxx.

45  {
46  getNodeInfo(session, dataShape, nodeNames, false);
47 }

◆ getTensorSize()

int64_t AthOnnx::getTensorSize ( const std::vector< int64_t > &  dataShape)

Definition at line 73 of file OnnxUtils.cxx.

73  {
74  int64_t size = 1;
75  for (const auto& dim : dataShape) {
76  size *= dim;
77  }
78  return size;
79 }

◆ inferenceWithIOBinding()

void AthOnnx::inferenceWithIOBinding ( Ort::Session &  session,
const std::vector< std::string > &  inputNames,
const std::vector< Ort::Value > &  inputData,
const std::vector< std::string > &  outputNames,
const std::vector< Ort::Value > &  outputData 
)

Definition at line 49 of file OnnxUtils.cxx.

53  {
54 
55  if (inputNames.empty()) {
56  throw std::runtime_error("Onnxruntime input data maping cannot be empty");
57  }
58  assert(inputNames.size() == inputData.size());
59 
60  Ort::IoBinding iobinding(session);
61  for(size_t idx = 0; idx < inputNames.size(); ++idx){
62  iobinding.BindInput(inputNames[idx].data(), inputData[idx]);
63  }
64 
65 
66  for(size_t idx = 0; idx < outputNames.size(); ++idx){
67  iobinding.BindOutput(outputNames[idx].data(), outputData[idx]);
68  }
69 
70  session.Run(Ort::RunOptions{nullptr}, iobinding);
71 }

◆ read_mnist_label()

std::vector<int> AthOnnx::read_mnist_label ( const std::string &  full_path)

Definition at line 51 of file EvaluateModel.cxx.

52  {
53  std::vector<int> output_tensor_values(1*10000);
54  std::ifstream file (full_path.c_str(), std::ios::binary);
55  int magic_number=0;
56  int number_of_labels=0;
57  file.read((char*)&magic_number,sizeof(magic_number));
58  magic_number= ntohl(magic_number);
59  file.read((char*)&number_of_labels,sizeof(number_of_labels));
60  number_of_labels= ntohl(number_of_labels);
61  for(int i=0;i<number_of_labels;++i)
62  {
63  unsigned char temp=0;
64  file.read((char*)&temp,sizeof(temp));
65  output_tensor_values[i]= int(temp);
66  }
67  return output_tensor_values;
68  }

◆ read_mnist_pixel_notFlat()

std::vector<std::vector<std::vector<float> > > AthOnnx::read_mnist_pixel_notFlat ( const std::string &  full_path)

Definition at line 17 of file EvaluateModel.cxx.

18  {
19  std::vector<std::vector<std::vector<float>>> input_tensor_values;
20  input_tensor_values.resize(10000, std::vector<std::vector<float> >(28,std::vector<float>(28)));
21  std::ifstream file (full_path.c_str(), std::ios::binary);
22  int magic_number=0;
23  int number_of_images=0;
24  int n_rows=0;
25  int n_cols=0;
26  file.read((char*)&magic_number,sizeof(magic_number));
27  magic_number= ntohl(magic_number);
28  file.read((char*)&number_of_images,sizeof(number_of_images));
29  number_of_images= ntohl(number_of_images);
30  file.read((char*)&n_rows,sizeof(n_rows));
31  n_rows= ntohl(n_rows);
32  file.read((char*)&n_cols,sizeof(n_cols));
33  n_cols= ntohl(n_cols);
34  for(int i=0;i<number_of_images;++i)
35  {
36  for(int r=0;r<n_rows;++r)
37  {
38  for(int c=0;c<n_cols;++c)
39  {
40  unsigned char temp=0;
41  file.read((char*)&temp,sizeof(temp));
42  input_tensor_values[i][r][c]= float(temp)/255;
43  }
44  }
45  }
46  return input_tensor_values;
47  }
beamspotman.r
def r
Definition: beamspotman.py:676
data
char data[hepevt_bytes_allocation_ATLAS]
Definition: HepEvt.cxx:11
yodamerge_tmp.dim
dim
Definition: yodamerge_tmp.py:239
CaloCellPos2Ntuple.int
int
Definition: CaloCellPos2Ntuple.py:24
python.setupRTTAlg.size
int size
Definition: setupRTTAlg.py:39
lumiFormat.i
int i
Definition: lumiFormat.py:92
file
TFile * file
Definition: tile_monitor.h:29
AthOnnx::getNodeInfo
void getNodeInfo(const Ort::Session &session, std::vector< std::vector< int64_t > > &dataShape, std::vector< std::string > &nodeNames, bool isInput)
Definition: OnnxUtils.cxx:9
XMLtoHeader.outputNames
outputNames
Definition: XMLtoHeader.py:18
LArNewCalib_DelayDump_OFC_Cali.idx
idx
Definition: LArNewCalib_DelayDump_OFC_Cali.py:69
python.compressB64.c
def c
Definition: compressB64.py:93
readCCLHist.float
float
Definition: readCCLHist.py:83