Belle II Software development
GRLNeuro Class Reference

Class to represent the GRL Neuro. More...

#include <GRLNeuro.h>

Classes

struct  Parameters
 Struct to keep neurotrigger parameters. More...
 

Public Member Functions

 GRLNeuro ()
 Default constructor.
 
virtual ~GRLNeuro ()
 Default destructor.
 
void initialize (const Parameters &p)
 Set parameters and get some network independent parameters.
 
GRLMLPoperator[] (unsigned index)
 return reference to a neural network
 
const GRLMLPoperator[] (unsigned index) const
 return const reference to a neural network
 
unsigned nSectors () const
 return number of neural networks
 
void save (const std::string &filename, const std::string &arrayname="MLPs")
 Save MLPs to file.
 
bool load (unsigned isector, const std::string &wfilename, const std::string &bfilename)
 Load MLPs from file.
 
bool load (unsigned isector, std::vector< float > weight, std::vector< float > bias)
 Load MLPs from file.
 
float runMLP (unsigned isector, const std::vector< float > &input)
 Run an expert MLP.
 

Private Attributes

std::vector< GRLMLPm_MLPs = {}
 List of networks.
 

Detailed Description

Class to represent the GRL Neuro.

The Neurotrigger consists of one or several Multi Layer Perceptrons. The input values are calculated from ECLTRG cluster and a 2D track estimate. The output is a scaled estimate of the judgement.

See also
GRLNeuro Modules:
GRLTrainerModule for preparing training data and training,
GRLNeuro for loading trained networks and using them.

Definition at line 35 of file GRLNeuro.h.

Constructor & Destructor Documentation

◆ GRLNeuro()

GRLNeuro ( )
inline

Default constructor.

Definition at line 71 of file GRLNeuro.h.

71{}

◆ ~GRLNeuro()

virtual ~GRLNeuro ( )
inlinevirtual

Default destructor.

Definition at line 74 of file GRLNeuro.h.

74{}

Member Function Documentation

◆ initialize()

void initialize ( const Parameters & p)

Set parameters and get some network independent parameters.

Definition at line 284 of file GRLNeuro.cc.

285{
286 // check parameters
287 bool okay = true;
288 // ensure that length of lists matches number of sectors
289 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
290 B2ERROR("Number of nHidden lists should be 1 or " << p.nMLP);
291 okay = false;
292 }
293 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
294 B2ERROR("Number of outputScale lists should be 1 or " << p.nMLP);
295 okay = false;
296 }
297 // ensure that number of target nodes is valid
298 unsigned short nTarget = int(p.targetresult);
299 if (nTarget < 1) {
300 B2ERROR("No outputs! Turn on targetresult.");
301 okay = false;
302 }
303 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
304 if (p.outputScale[iScale].size() != 2 * nTarget) {
305 B2ERROR("outputScale should be exactly " << 2 * nTarget << " values");
306 okay = false;
307 }
308 }
309
310 if (!okay) return;
311 // initialize MLPs
312 m_MLPs.clear();
313 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
314 //get indices for sector parameters
315 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
316 vector<float> nhidden = p.nHidden[iMLP];
317 vector<unsigned short> nNodes = {nInput};
318 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
319 if (p.multiplyHidden) {
320 nNodes.push_back(nhidden[iHid] * nNodes[0]);
321 } else {
322 nNodes.push_back(nhidden[iHid]);
323 }
324 }
325 nNodes.push_back(nTarget);
326 unsigned short targetVars = int(p.targetresult);
327 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
328 m_MLPs.push_back(GRLMLP(nNodes, targetVars, outputScale));
329 }
330}
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:114

◆ load() [1/2]

bool load ( unsigned isector,
const std::string & wfilename,
const std::string & bfilename )

Load MLPs from file.

Parameters
isectorindex of the MLP
wfilenamename of the TFile to read from
bfilenamename of the TObjArray holding the MLPs in the file
Returns
true if the MLPs were loaded correctly

Definition at line 528 of file GRLNeuro.cc.

529{
530 if (weightfilename.size() < 1) {
531 B2ERROR("Could not load Neurotrigger weights from database!");
532 return false;
533 } else if (biasfilename.size() < 1) {
534 B2ERROR("Could not load Neurotrigger bias from database!");
535 return false;
536 } else {
537 std::ifstream wfile(weightfilename);
538 if (!wfile.is_open()) {
539 B2WARNING("Could not open file " << weightfilename);
540 return false;
541 } else {
542 std::ifstream bfile(biasfilename);
543 if (!bfile.is_open()) {
544 B2WARNING("Could not open file " << biasfilename);
545 return false;
546 } else {
547 GRLMLP& expert = m_MLPs[isector];
548 std::vector<float> warray;
549 std::vector<float> barray;
550 warray.clear();
551 barray.clear();
552
553 float element;
554 while (wfile >> element) {
555 warray.push_back(element);
556 }
557 while (bfile >> element) {
558 barray.push_back(element);
559 }
560
561 if (warray.size() != expert.nWeightsCal()) {
562 B2ERROR("Number of weights is not equal to registered architecture!");
563 return false;
564 } else expert.setWeights(warray);
565 if (barray.size() != expert.nBiasCal()) {
566 B2ERROR("Number of bias is not equal to registered architecture!");
567 return false;
568 }
569
570 expert.setWeights(warray);
571 expert.setBias(barray);
572 return true;
573 }
574 }
575 }
576}

◆ load() [2/2]

bool load ( unsigned isector,
std::vector< float > weight,
std::vector< float > bias )

Load MLPs from file.

Parameters
isectorindex of the MLP
weightarray from database
biasarray from database
Returns
true if the MLPs were loaded correctly

Definition at line 578 of file GRLNeuro.cc.

579{
580 GRLMLP& expert = m_MLPs[isector];
581 expert.setWeights(warray);
582 expert.setBias(barray);
583 return true;
584}

◆ nSectors()

unsigned nSectors ( ) const
inline

return number of neural networks

Definition at line 84 of file GRLNeuro.h.

84{ return m_MLPs.size(); }

◆ operator[]() [1/2]

GRLMLP & operator[] ( unsigned index)
inline

return reference to a neural network

Definition at line 80 of file GRLNeuro.h.

80{ return m_MLPs[index]; }

◆ operator[]() [2/2]

const GRLMLP & operator[] ( unsigned index) const
inline

return const reference to a neural network

Definition at line 82 of file GRLNeuro.h.

82{ return m_MLPs[index]; }

◆ runMLP()

float runMLP ( unsigned isector,
const std::vector< float > & input )

Run an expert MLP.

Parameters
isectorindex of the MLP
inputvector of input values
Returns
output values (classifier)

Definition at line 387 of file GRLNeuro.cc.

388{
389
390
391 const GRLMLP& expert = m_MLPs[isector];
392 vector<float> weights = expert.getWeights();
393 vector<float> bias = expert.getBias();
394 vector<float> layerinput = input;
395
396 vector<float> layeroutput2 = {};
397 vector<float> layeroutput3 = {};
398 vector<float> layeroutput4 = {};
399
401 for (size_t i = 0; i < layerinput.size(); ++i) {
402 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
403 }
404 layeroutput2.clear();
405 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
406
407 unsigned num_inputs = layerinput.size();
408 unsigned num_neurons = expert.getNumberOfNodesLayer(2); // 64
409 for (unsigned io = 0; io < num_neurons; ++io) {
410 float bias_raw = bias[io];
411 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
412 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
413 layeroutput2[io] = bias_contrib;
414 }
415
416 unsigned iw = 0;
417// input*weight
418 for (unsigned ii = 0; ii < num_inputs; ++ii) {
419 float input_val = layerinput[ii];
420 for (unsigned io = 0; io < num_neurons; ++io) {
421 float weight_raw = weights[iw];
422 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
423 float product = input_val * weight_fixed;
424 float contrib = sim_fix_dense_0_accum_t(product);
425
426 layeroutput2[io] += contrib;
427
428 ++iw;
429 }
430 }
431
432
433//apply activation function, ReLU for hidden layer and output layer
434// === dense_0_t + ReLU ===
435 std::vector<float> layeroutput2_fixed_relu(num_neurons);
436
437 for (unsigned io = 0; io < num_neurons; ++io) {
438 // dense_0_t)
439 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
440
441 // ReLU
442 float relu_val = (fixed_val > 0) ? fixed_val : 0;
443
444 layeroutput2_fixed_relu[io] = relu_val;
445
446 }
447
448 std::vector<float> dense1_input(64);
449 for (unsigned i = 0; i < 64; ++i) {
450 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
451 }
452
453 layeroutput3.clear();
454 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
455 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
456 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
457 for (unsigned io = 64; io < num_neurons_1 + 64; ++io) {
458 float bias_raw = bias[io];
459 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
460 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
461 layeroutput3[io - 64] = bias_contrib;
462
463 }
464
465
466 for (unsigned ii = 0; ii < num_inputs_1; ++ii) {
467 float input_val = dense1_input[ii];
468 for (unsigned io = 0; io < num_neurons_1; ++io) {
469
470 float weight_raw = weights[iw];
471
472 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
473 float product = input_val * weight_fixed;
474 float contrib = sim_fix_dense_1_accum_t(product);
475
476 layeroutput3[io] += contrib;
477 ++iw;
478 }
479 }
480
481
482 std::vector<float> layeroutput3_fixed_relu(num_neurons);
483
484
485 for (unsigned io = 0; io < num_neurons_1; ++io) {
486 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
487 // ReLU
488 float relu_val = (fixed_val > 0) ? fixed_val : 0;
489
490 layeroutput3_fixed_relu[io] = relu_val;
491
492 }
493 std::vector<float> dense2_input(64);
494 for (unsigned i = 0; i < 64; ++i) {
495 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
496 }
497 layeroutput4.clear();
498 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
499
500 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
501 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
502 for (unsigned io = 128; io < num_neurons_2 + 128; ++io) {
503 float bias_raw = bias[io];
504 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
505 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
506 layeroutput4[io - 128] = bias_contrib;
507
508 }
509
510 for (unsigned ii = 0; ii < num_inputs_2; ++ii) {
511 float input_val = dense2_input[ii];
512 for (unsigned io = 0; io < num_neurons_2; ++io) {
513 float weight_raw = weights[iw];
514 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
515 float product = input_val * weight_fixed;
516 float contrib = sim_fix_dense_2_accum_t(product);
517
518 layeroutput4[io] += contrib;
519
520 ++iw;
521 }
522 }
523 return layeroutput4[0];
524
525}

◆ save()

void save ( const std::string & filename,
const std::string & arrayname = "MLPs" )

Save MLPs to file.

Parameters
filenamename of the TFile to write to
arraynamename of the TObjArray holding the MLPs in the file

Member Data Documentation

◆ m_MLPs

std::vector<GRLMLP> m_MLPs = {}
private

List of networks.

Definition at line 114 of file GRLNeuro.h.

114{};

The documentation for this class was generated from the following files: