#include <OnnxUtil.h>
◆ OnnxUtil()
OnnxUtil::OnnxUtil |
( |
const std::string & |
name | ) |
|
Definition at line 16 of file FlavorTagDiscriminants/Root/OnnxUtil.cxx.
18 :
m_env (std::make_unique<Ort::Env>(ORT_LOGGING_LEVEL_FATAL,
""))
21 Ort::SessionOptions session_options;
22 session_options.SetIntraOpNumThreads(1);
26 session_options.SetLogSeverityLevel(4);
27 session_options.SetGraphOptimizationLevel(
28 GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
31 Ort::AllocatorWithDefaultOptions allocator;
34 m_session = std::make_unique<Ort::Session>(
35 *
m_env, path_to_onnx.c_str(), session_options);
38 m_metadata = loadMetadata(
"gnn_config");
39 m_num_inputs =
m_session->GetInputCount();
40 m_num_outputs =
m_session->GetOutputCount();
43 if (m_metadata.contains(
"onnx_model_version")) {
44 m_onnx_model_version = m_metadata[
"onnx_model_version"].get<
OnnxModelVersion>();
46 throw std::runtime_error(
"Unknown Onnx model version!");
49 if (m_metadata.contains(
"outputs")){
50 m_onnx_model_version = OnnxModelVersion::V0;
52 throw std::runtime_error(
"Onnx model version not found in metadata");
57 m_model_name = determineModelName();
60 for (
size_t i = 0;
i < m_num_inputs;
i++) {
61 std::string input_name =
m_session->GetInputNameAllocated(
i, allocator).get();
66 for (
size_t i = 0;
i < m_num_outputs;
i++) {
67 const auto name = std::string(
m_session->GetOutputNameAllocated(
i, allocator).get());
68 const auto type =
m_session->GetOutputTypeInfo(
i).GetTensorTypeAndShapeInfo().GetElementType();
69 const int rank =
m_session->GetOutputTypeInfo(
i).GetTensorTypeAndShapeInfo().GetShape().size();
70 if (m_onnx_model_version == OnnxModelVersion::V0) {
71 const OnnxOutput onnxOutput(
name,
type, m_model_name);
72 m_output_nodes.push_back(onnxOutput);
74 const OnnxOutput onnxOutput(
name,
type, rank);
75 m_output_nodes.push_back(onnxOutput);
◆ ~OnnxUtil()
◆ initialize()
void OnnxUtil::initialize |
( |
| ) |
|
Definition at line 17 of file JetTagPerformanceCalibration/xAODBTaggingEfficiency/Root/OnnxUtil.cxx.
22 m_env = std::make_unique< Ort::Env >(ORT_LOGGING_LEVEL_WARNING,
"");
25 Ort::SessionOptions session_options;
26 session_options.SetIntraOpNumThreads(1);
27 session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
30 m_session = std::make_unique< Ort::Session >(*
m_env, fullPathToFile.c_str(), session_options);
31 Ort::AllocatorWithDefaultOptions allocator;
34 size_t num_input_nodes =
m_session->GetInputCount();
37 for (std::size_t
i = 0;
i < num_input_nodes;
i++) {
38 auto input_name =
m_session->GetInputNameAllocated(
i, allocator);
43 size_t num_output_nodes =
m_session->GetOutputCount();
44 std::vector<int64_t> output_node_dims;
47 for(std::size_t
i = 0;
i < num_output_nodes;
i++ ) {
48 auto output_name =
m_session->GetOutputNameAllocated(
i, allocator);
52 Ort::TypeInfo type_info =
m_session->GetOutputTypeInfo(
i);
53 auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
55 output_node_dims = tensor_info.GetShape();
◆ runInference() [1/2]
void OnnxUtil::runInference |
( |
const std::vector< std::vector< float >> & |
node_feat, |
|
|
std::vector< float > & |
effAllJet |
|
) |
| const |
Definition at line 64 of file JetTagPerformanceCalibration/xAODBTaggingEfficiency/Root/OnnxUtil.cxx.
72 std::vector<float> input_tensor_values;
73 std::vector<int64_t> input_node_dims = {1,
static_cast<int>(node_feat.size()),
static_cast<int>(node_feat.at(0).size())};
75 for (
const auto&
it : node_feat){
76 input_tensor_values.insert(input_tensor_values.end(),
it.begin(),
it.end());
80 auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
81 Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_values.size(), input_node_dims.data(), input_node_dims.size());
98 auto output_tensors = session.Run(Ort::RunOptions{
nullptr}, input_node_names.data(), &input_tensor, input_node_names.size(), output_node_names.data(), output_node_names.size());
101 float* float_ptr = output_tensors.front().GetTensorMutableData<
float>();
102 int num_jets = node_feat.size();
103 effAllJet = {float_ptr, float_ptr + num_jets};
◆ runInference() [2/2]
void OnnxUtil::runInference |
( |
const std::vector< std::vector< float >> & |
node_feat, |
|
|
std::vector< std::vector< float >> & |
effAllJetAllWp |
|
) |
| const |
Definition at line 108 of file JetTagPerformanceCalibration/xAODBTaggingEfficiency/Root/OnnxUtil.cxx.
118 std::vector<float> input_tensor_values;
119 std::vector<int64_t> input_node_dims = {1,
static_cast<int>(node_feat.size()),
static_cast<int>(node_feat.at(0).size())};
121 for (
auto&
it : node_feat){
122 input_tensor_values.insert(input_tensor_values.end(),
it.begin(),
it.end());
126 auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
127 Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_values.size(), input_node_dims.data(), input_node_dims.size());
144 auto output_tensors = session.Run(Ort::RunOptions{
nullptr}, input_node_names.data(), &input_tensor, input_node_names.size(), output_node_names.data(), output_node_names.size());
147 float* float_ptr = output_tensors.front().GetTensorMutableData<
float>();
149 int num_jets = node_feat.size();
151 for (
int i=0;
i<num_jets;
i++){
152 std::vector<float> eff_one_jet_tmp;
154 eff_one_jet_tmp.push_back(float_ptr[
i*
m_num_wp+j]);
156 effAllJetAllWp.push_back(eff_one_jet_tmp);
◆ m_env
std::unique_ptr< Ort::Env > OnnxUtil::m_env |
|
private |
◆ m_input_node_names
std::vector<std::string> OnnxUtil::m_input_node_names |
|
private |
◆ m_num_wp
int OnnxUtil::m_num_wp {} |
|
private |
◆ m_output_node_names
std::vector<std::string> OnnxUtil::m_output_node_names |
|
private |
◆ m_path_to_onnx
std::string OnnxUtil::m_path_to_onnx |
|
private |
◆ m_session
std::unique_ptr< Ort::Session > OnnxUtil::m_session |
|
private |
The documentation for this class was generated from the following files: