Belle II Software light-2509-fornax
ONNXExpert Class Reference

Expert for the ONNX MVA method. More...

#include <ONNX.h>

Inheritance diagram for ONNXExpert:
Collaboration diagram for ONNXExpert:

Public Member Functions

virtual void load (Weightfile &weightfile) override
 Load the expert from a Weightfile.
 
virtual std::vector< float > apply (Dataset &testData) const override
 Apply this expert onto a dataset.
 
virtual std::vector< std::vector< float > > applyMulticlass (Dataset &test_data) const override
 Apply this expert onto a dataset and return multiple outputs.
 

Protected Attributes

GeneralOptions m_general_options
 General options loaded from the weightfile.
 

Private Member Functions

void configureInputOutputNames ()
 Set up input and output names and perform consistency checks.
 
void configureOutputValueIndex ()
 Configure index of the value to be used for the configured output tensor.
 

Private Attributes

std::unique_ptr< ONNX::Sessionm_session
 The ONNX inference session wrapper.
 
ONNXOptions m_specific_options
 ONNX specific options loaded from weightfile.
 
std::string m_inputName
 Name of the input tensor (will be determined automatically)
 
std::string m_outputName
 Name of the output tensor (will either be determined automatically or loaded from specific options)
 
int m_outputValueIndex
 Index of the output value to pick in non-multiclass mode.
 

Detailed Description

Expert for the ONNX MVA method.

Definition at line 446 of file ONNX.h.

Member Function Documentation

◆ apply()

std::vector< float > apply ( Dataset & testData) const
overridevirtual

Apply this expert onto a dataset.

Parameters
testDatadataset

Implements Expert.

Definition at line 168 of file ONNX.cc.

169{
170 const auto nFeatures = testData.getNumberOfFeatures();
171 const auto nEvents = testData.getNumberOfEvents();
172 const int nOutputs = (m_outputValueIndex == 1) ? 2 : 1;
173 auto input = Tensor<float>::make_shared({1, nFeatures});
174 auto output = Tensor<float>::make_shared({1, nOutputs});
175 std::vector<float> result;
176 result.reserve(nEvents);
177 for (unsigned int iEvent = 0; iEvent < nEvents; ++iEvent) {
178 testData.loadEvent(iEvent);
179 input->setValues(testData.m_input);
180 m_session->run({{m_inputName, input}}, {{m_outputName, output}});
181 result.push_back(output->at(m_outputValueIndex));
182 }
183 return result;
184}
virtual unsigned int getNumberOfEvents() const =0
Returns the number of events in this dataset.
virtual unsigned int getNumberOfFeatures() const =0
Returns the number of features in this dataset.
virtual void loadEvent(unsigned int iEvent)=0
Load the event number iEvent.
std::vector< float > m_input
Contains all feature values of the currently loaded event.
Definition Dataset.h:123
std::unique_ptr< ONNX::Session > m_session
The ONNX inference session wrapper.
Definition ONNX.h:484
std::string m_outputName
Name of the output tensor (will either be determined automatically or loaded from specific options)
Definition ONNX.h:500
std::string m_inputName
Name of the input tensor (will be determined automatically)
Definition ONNX.h:494
int m_outputValueIndex
Index of the output value to pick in non-multiclass mode.
Definition ONNX.h:505
static auto make_shared(std::vector< int64_t > shape)
Convenience method to create a shared pointer to a Tensor from shape.
Definition ONNX.h:145

◆ applyMulticlass()

std::vector< std::vector< float > > applyMulticlass ( Dataset & test_data) const
overridevirtual

Apply this expert onto a dataset and return multiple outputs.

Parameters
test_datadataset

Reimplemented from Expert.

Definition at line 186 of file ONNX.cc.

187{
188 const unsigned int nClasses = m_general_options.m_nClasses;
189 const auto nFeatures = testData.getNumberOfFeatures();
190 const auto nEvents = testData.getNumberOfEvents();
191 auto input = Tensor<float>::make_shared({1, nFeatures});
192 auto output = Tensor<float>::make_shared({1, nClasses});
193 std::vector<std::vector<float>> result(nEvents, std::vector<float>(nClasses));
194 for (unsigned int iEvent = 0; iEvent < nEvents; ++iEvent) {
195 testData.loadEvent(iEvent);
196 input->setValues(testData.m_input);
197 m_session->run({{m_inputName, input}}, {{m_outputName, output}});
198 for (unsigned int iClass = 0; iClass < nClasses; ++iClass) {
199 result[iEvent][iClass] = output->at(iClass);
200 }
201 }
202 return result;
203}
GeneralOptions m_general_options
General options loaded from the weightfile.
Definition Expert.h:70
unsigned int m_nClasses
Number of classes in a classification problem.
Definition Options.h:89

◆ configureInputOutputNames()

void configureInputOutputNames ( )
private

Set up input and output names and perform consistency checks.

Definition at line 89 of file ONNX.cc.

90{
91 const auto& inputNames = m_session->getOrtSession().GetInputNames();
92 const auto& outputNames = m_session->getOrtSession().GetOutputNames();
93
94 // Check if we have a single input model and set the input name to that
95 if (inputNames.size() != 1) {
96 std::stringstream msg;
97 msg << "Model has multiple inputs: ";
98 for (auto name : inputNames)
99 msg << "\"" << name << "\" ";
100 msg << "- only single-input models are supported.";
101 B2FATAL(msg.str());
102 }
103 m_inputName = inputNames[0];
104
105 m_outputName = m_specific_options.m_outputName;
106
107 // For single-output models we just take the name of that single output
108 if (outputNames.size() == 1) {
109 if (!m_outputName.empty() && m_outputName != outputNames[0]) {
110 B2INFO("Output name of the model is "
111 << outputNames[0]
112 << " - will use that despite the configured name being \""
113 << m_outputName << "\"");
114 }
115 m_outputName = outputNames[0];
116 return;
117 }
118
119 // Otherwise we have a multiple-output model and need to check if the
120 // configured output name, or the fallback value "output", exists
121 if (m_outputName.empty()) {
122 m_outputName = "output";
123 }
124 auto outputFound = std::find(outputNames.begin(), outputNames.end(),
125 m_outputName) != outputNames.end();
126 if (!outputFound) {
127 std::stringstream msg;
128 msg << "No output named \"" << m_outputName << "\" found. Instead got ";
129 for (auto name : outputNames)
130 msg << "\"" << name << "\" ";
131 msg << "- either change your model to contain one named \"" << m_outputName
132 << "\" or set `m_outputName` in the specific options to one of the available names.";
133 B2FATAL(msg.str());
134 }
135}
ONNXOptions m_specific_options
ONNX specific options loaded from weightfile.
Definition ONNX.h:489

◆ configureOutputValueIndex()

void configureOutputValueIndex ( )
private

Configure index of the value to be used for the configured output tensor.

Will be 0 in case of a single element (binary classifier with single output or regression) and 1 in case of 2 elements (binary classifier with 2 outputs). For more than 2 elements one has to call applyMultiClass.

Definition at line 137 of file ONNX.cc.

138{
139 int tensorIndex = 0;
140 for (auto name : m_session->getOrtSession().GetOutputNames()) {
141 if (name == m_outputName)
142 break;
143 ++tensorIndex;
144 }
145 auto typeInfo = m_session->getOrtSession().GetOutputTypeInfo(tensorIndex);
146 auto shape = typeInfo.GetTensorTypeAndShapeInfo().GetShape();
147 if (shape.back() == 2) {
148 // We have 2 output values
149 // -> configure to use signal_class index (default 1) in non-multiclass mode
151 } else {
152 // otherwise use the default of 0
154 }
155}
int m_signal_class
Signal class which is used as signal in a classification problem.
Definition Options.h:88

◆ load()

void load ( Weightfile & weightfile)
overridevirtual

Load the expert from a Weightfile.

Parameters
weightfilecontaining all information necessary to build the expert

Implements Expert.

Definition at line 157 of file ONNX.cc.

158{
159 std::string onnxModelFileName = weightfile.generateFileName();
160 weightfile.getFile("ONNX_Modelfile", onnxModelFileName);
161 weightfile.getOptions(m_general_options);
162 weightfile.getOptions(m_specific_options);
163 m_session = std::make_unique<Session>(onnxModelFileName.c_str());
166}
void configureInputOutputNames()
Set up input and output names and perform consistency checks.
Definition ONNX.cc:89
void configureOutputValueIndex()
Configure index of the value to be used for the configured output tensor.
Definition ONNX.cc:137

Member Data Documentation

◆ m_general_options

GeneralOptions m_general_options
protectedinherited

General options loaded from the weightfile.

Definition at line 70 of file Expert.h.

◆ m_inputName

std::string m_inputName
private

Name of the input tensor (will be determined automatically)

Definition at line 494 of file ONNX.h.

◆ m_outputName

std::string m_outputName
private

Name of the output tensor (will either be determined automatically or loaded from specific options)

Definition at line 500 of file ONNX.h.

◆ m_outputValueIndex

int m_outputValueIndex
private

Index of the output value to pick in non-multiclass mode.

Definition at line 505 of file ONNX.h.

◆ m_session

std::unique_ptr<ONNX::Session> m_session
private

The ONNX inference session wrapper.

Definition at line 484 of file ONNX.h.

◆ m_specific_options

ONNXOptions m_specific_options
private

ONNX specific options loaded from weightfile.

Definition at line 489 of file ONNX.h.


The documentation for this class was generated from the following files: