27     std::unique_ptr< Ort::Session > CreateORTSession(
const std::string& modelFile){
 
   28         Ort::SessionOptions sessionOptions;
 
   29         sessionOptions.SetIntraOpNumThreads( 1 );
 
   30         sessionOptions.SetGraphOptimizationLevel( ORT_ENABLE_BASIC );
 
   33         std::string serviceName;
 
   34         #ifdef XAOD_STANDALONE 
   35             using namespace asg::msgUserCode;
 
   36             ANA_MSG_WARNING(
"If running DNN calibration in AnalysisBase: necessary to instantiate the ONNX service AthOnnx::OnnxRuntimeSvc with name OnnxRuntimeSvc");
 
   37             ATH_MSG_WARNING(
"Either in C++ config (see exemple in JetCalibTools_Example.cxx)");
 
   39             ATH_MSG_WARNING(
"   from AnaAlgorithm.DualUseConfig import createService");
 
   40             ATH_MSG_WARNING(
"   onnxSvc = createService('AthOnnx::OnnxRuntimeSvc', 'OnnxRuntimeSvc', myAlgSequence)");
 
   41             serviceName = 
"OnnxRuntimeSvc";
 
   43             serviceName = 
"AthOnnx::OnnxRuntimeSvc";
 
   48         return std::make_unique<Ort::Session>( 
svc->env(),
 
   54     std::tuple<std::vector<int64_t>, std::vector<const char*> > GetInputNodeInfo(
const std::unique_ptr< Ort::Session >& session){
 
   55         std::vector<int64_t> input_node_dims;
 
   56         size_t num_input_nodes = session->GetInputCount();
 
   57         std::vector<const char*> input_node_names(num_input_nodes);
 
   58         Ort::AllocatorWithDefaultOptions allocator;
 
   59         for( std::size_t 
i = 0; 
i < num_input_nodes; 
i++ ) {
 
   61             char* input_name = session->GetInputNameAllocated(
i, allocator).release();
 
   62             input_node_names[
i] = input_name;
 
   63             Ort::TypeInfo type_info = session->GetInputTypeInfo(
i);
 
   64             auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
 
   66             input_node_dims = tensor_info.GetShape();
 
   68         return std::make_tuple(input_node_dims, input_node_names);
 
   72     std::tuple<std::vector<int64_t>, std::vector<const char*> > GetOutputNodeInfo(
const std::unique_ptr< Ort::Session >& session){
 
   73         std::vector<int64_t> output_node_dims;
 
   74         size_t num_output_nodes = session->GetOutputCount();
 
   75         std::vector<const char*> output_node_names(num_output_nodes);
 
   76         Ort::AllocatorWithDefaultOptions allocator;
 
   78         for( std::size_t 
i = 0; 
i < num_output_nodes; 
i++ ) {
 
   79             char* output_name = session->GetOutputNameAllocated(
i, allocator).release();
 
   80             output_node_names[
i] = output_name;
 
   82             Ort::TypeInfo type_info = session->GetOutputTypeInfo(
i);
 
   83             auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
 
   85             output_node_dims = tensor_info.GetShape();
 
   87         return std::make_tuple(output_node_dims, output_node_names);
 
  105         VarAccessorRetriever(
const std::string &
n): m_acc(
n) {}
 
  108             return m_acc(
jet) * eScale;
 
  117         RatioAccessorRetriever():   m_accTau1(
"Tau1_wta"), 
 
  118                                     m_accTau2(
"Tau2_wta"),
 
  119                                     m_accTau3(
"Tau3_wta"),
 
  135     #define DEF_RETRIEVER0(cname, expr )  struct Var_##cname : public GlobalLargeRDNNCalibration::VarRetriever { float value(const xAOD::Jet& jet, JetEventInfo& , double eScale ) { return expr ; } } 
  136     #define DEF_RETRIEVER1(cname, expr )  struct Var_##cname : public GlobalLargeRDNNCalibration::VarRetriever { float value(const xAOD::Jet& , JetEventInfo& jetInfo, double eScale ) { return expr ; } } 
  137     #define DEF_RATIO_RETRIEVER(cname, expr )  struct Ratio_##cname : public RatioAccessorRetriever { float value(const xAOD::Jet& jet, JetEventInfo& , double eScale ) { return expr ; } } 
  163             {
"eta",       [](){
return new Var_eta();} },
 
  164             {
"rapidity",  [](){
return new Var_rapidity();} },
 
  165             {
"log_e",     [](){
return new Var_log_e();} },
 
  166             {
"log_m",     [](){
return new Var_log_m();} },
 
  167             {
"Tau21_wta", [](){
return new Ratio_Tau21_wta();} },
 
  168             {
"Tau32_wta", [](){
return new Ratio_Tau32_wta();} },
 
  169             {
"C2",        [](){
return new Ratio_C2();} },
 
  170             {
"D2",        [](){
return new Ratio_D2();} },
 
  171             {
"mu",        [](){
return new Var_mu();} },
 
  172             {
"NPV",       [](){
return new Var_NPV();} },
 
  188     m_config(nullptr), m_calibArea(
"")
 
  194     m_config(nullptr), m_calibArea(
"")
 
  200     m_config(
config), m_calibArea(calibArea), m_devMode(dev)
 
  212     if ( !
m_config ) { 
ATH_MSG_FATAL(
"Config file not specified. Aborting."); 
return StatusCode::FAILURE; }
 
  230         ATH_MSG_FATAL(
"Misconfiguration of config file : not same number of offset/scale parameters and number of features. Will exit");
 
  231         return StatusCode::FAILURE;
 
  249     std::string modelPath = 
"";
 
  260     m_session = CreateORTSession(fullModelPath);
 
  266     std::tuple<std::vector<int64_t>, std::vector<const char*> > inputInfo = GetInputNodeInfo(
m_session);
 
  285     std::tuple<std::vector<int64_t>, std::vector<const char*> > outputInfo = GetOutputNodeInfo(
m_session);
 
  310         ATH_MSG_FATAL(
"DNN input features not the same size as in config, will exit");
 
  311         return StatusCode::FAILURE;
 
  317     return StatusCode::SUCCESS;
 
  327     jetStartP4 = 
jet.jetP4();
 
  330     if(
jet.m()<=0 || 
jet.numConstituents()==1){
 
  332         return StatusCode::SUCCESS;
 
  339         for (
long unsigned int i=0;
i<input_tensor_values.size();
i++) 
ATH_MSG_DEBUG(
" " << input_tensor_values[
i]);
 
  343     int nNan = std::count_if(input_tensor_values.begin(), input_tensor_values.end(), [](
float f){return std::isnan(f) || std::isinf(f);});
 
  345         ATH_MSG_WARNING(
"Encountered Nan or inf value in input features, will not apply calibration");
 
  347         return StatusCode::SUCCESS;
 
  351     Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeCPU);
 
  352     Ort::Value input_tensor = Ort::Value::CreateTensor<float>(  memory_info, 
 
  353                                                                 input_tensor_values.data(), 
 
  354                                                                 input_tensor_values.size(), 
 
  359     std::vector<float> 
vec(input_tensor.GetTensorMutableData<
float>(), input_tensor.GetTensorMutableData<
float>() + 
m_input_node_dims[1]);
 
  360     if (
vec!=input_tensor_values) {
 
  361         ATH_MSG_WARNING(
"Input tensor after convertion to Ort tensor is not the same as the input vector, will not apply calibration");
 
  363         return StatusCode::SUCCESS;
 
  368     auto output_tensor =  session.Run(  Ort::RunOptions{
nullptr},
 
  375         ATH_MSG_WARNING(
"Output tensor does not have the same size as output layer, will not apply calibration");
 
  377         return StatusCode::SUCCESS;
 
  381     float* outputE = output_tensor.at(0).GetTensorMutableData<
float>();
 
  382     float* outputM = output_tensor.at(1).GetTensorMutableData<
float>();
 
  385     float predRespE = outputE[0];  
 
  386     float predRespM = outputM[0];
 
  392     if (predRespE==0 || predRespM==0) {
 
  393         ATH_MSG_WARNING(
"Predictions give 0 values, will not apply calibration");
 
  395         return StatusCode::SUCCESS;
 
  399     float calibE = jetStartP4.e() / predRespE;
 
  402     float calibM = jetStartP4.mass();
 
  403     if ( calibM > 40000 ) {
 
  408     float calibpT = std::sqrt( calibE*calibE - calibM*calibM )/std::cosh( jetStartP4.eta() );
 
  411     TLorentzVector TLVjet;
 
  412     TLVjet.SetPtEtaPhiM( calibpT, jetStartP4.eta(), jetStartP4.phi(), calibM );
 
  414     calibP4.SetPxPyPzE( TLVjet.Px(), TLVjet.Py(), TLVjet.Pz(), TLVjet.E() );
 
  418     jet.setJetP4( calibP4 );
 
  420     return StatusCode::SUCCESS;
 
  428     std::vector<float> input_tensor_values(
m_NNInputs.size());
 
  431     for(
size_t i=0;
i<input_tensor_values.size();
i++){
 
  437     return input_tensor_values;