Belle II Software development
GRLNeuro Class Reference

Class to represent the GRL Neuro. More...

#include <GRLNeuro.h>

Classes

struct  Parameters
 Struct to keep neurotrigger parameters. More...
 

Public Member Functions

 GRLNeuro ()
 Default constructor.
 
virtual ~GRLNeuro ()
 Default destructor.
 
void initialize (const Parameters &p)
 Set parameters and get some network independent parameters.
 
GRLMLPoperator[] (unsigned index)
 return reference to a neural network
 
const GRLMLPoperator[] (unsigned index) const
 return const reference to a neural network
 
unsigned nSectors () const
 return number of neural networks
 
void save (const std::string &filename, const std::string &arrayname="MLPs")
 Save MLPs to file.
 
bool load (unsigned isector, const std::string &wfilename, const std::string &bfilename)
 Load MLPs from file.
 
bool load (unsigned isector, std::vector< float > weight, std::vector< float > bias)
 Load MLPs from file.
 
float runMLP (unsigned isector, const std::vector< float > &input)
 Run an expert MLP.
 

Private Attributes

std::vector< GRLMLPm_MLPs = {}
 List of networks.
 

Detailed Description

Class to represent the GRL Neuro.

The Neurotrigger consists of one or several Multi Layer Perceptrons. The input values are calculated from ECLTRG cluster and a 2D track estimate. The output is a scaled estimate of the judgement.

See also
GRLNeuro Modules:
GRLTrainerModule for preparing training data and training,
GRLNeuro for loading trained networks and using them.

Definition at line 35 of file GRLNeuro.h.

Constructor & Destructor Documentation

◆ GRLNeuro()

GRLNeuro ( )
inline

Default constructor.

Definition at line 71 of file GRLNeuro.h.

71{}

◆ ~GRLNeuro()

virtual ~GRLNeuro ( )
inlinevirtual

Default destructor.

Definition at line 74 of file GRLNeuro.h.

74{}

Member Function Documentation

◆ initialize()

void initialize ( const Parameters & p)

Set parameters and get some network independent parameters.

Definition at line 287 of file GRLNeuro.cc.

288{
289 // check parameters
290 bool okay = true;
291 // ensure that length of lists matches number of sectors
292 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
293 B2ERROR("Number of nHidden lists should be 1 or " << p.nMLP);
294 okay = false;
295 }
296 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
297 B2ERROR("Number of outputScale lists should be 1 or " << p.nMLP);
298 okay = false;
299 }
300 // ensure that number of target nodes is valid
301 unsigned short nTarget = int(p.targetresult);
302 if (nTarget < 1) {
303 B2ERROR("No outputs! Turn on targetresult.");
304 okay = false;
305 }
306 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
307 if (p.outputScale[iScale].size() != 2 * nTarget) {
308 B2ERROR("outputScale should be exactly " << 2 * nTarget << " values");
309 okay = false;
310 }
311 }
312
313 if (!okay) return;
314 // initialize MLPs
315 m_MLPs.clear();
316 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
317 //get indices for sector parameters
318 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
319 vector<float> nhidden = p.nHidden[iMLP];
320 vector<unsigned short> nNodes = {nInput};
321 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
322 if (p.multiplyHidden) {
323 nNodes.push_back(nhidden[iHid] * nNodes[0]);
324 } else {
325 nNodes.push_back(nhidden[iHid]);
326 }
327 }
328 nNodes.push_back(nTarget);
329 unsigned short targetVars = int(p.targetresult);
330 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
331 m_MLPs.push_back(GRLMLP(nNodes, targetVars, outputScale));
332 }
333}
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:114

◆ load() [1/2]

bool load ( unsigned isector,
const std::string & wfilename,
const std::string & bfilename )

Load MLPs from file.

Parameters
isectorindex of the MLP
wfilenamename of the TFile to read from
bfilenamename of the TObjArray holding the MLPs in the file
Returns
true if the MLPs were loaded correctly

Definition at line 530 of file GRLNeuro.cc.

531{
532 if (weightfilename.size() < 1) {
533 B2ERROR("Could not load Neurotrigger weights from database!");
534 return false;
535 } else if (biasfilename.size() < 1) {
536 B2ERROR("Could not load Neurotrigger bias from database!");
537 return false;
538 } else {
539 std::ifstream wfile(weightfilename);
540 if (!wfile.is_open()) {
541 B2WARNING("Could not open file " << weightfilename);
542 return false;
543 } else {
544 std::ifstream bfile(biasfilename);
545 if (!bfile.is_open()) {
546 B2WARNING("Could not open file " << biasfilename);
547 return false;
548 } else {
549 GRLMLP& expert = m_MLPs[isector];
550 std::vector<float> warray;
551 std::vector<float> barray;
552 warray.clear();
553 barray.clear();
554
555 float element;
556 while (wfile >> element) {
557 warray.push_back(element);
558 }
559 while (bfile >> element) {
560 barray.push_back(element);
561 }
562
563 if (warray.size() != expert.nWeightsCal()) {
564 B2ERROR("Number of weights is not equal to registered architecture!");
565 return false;
566 } else expert.setWeights(warray);
567 if (barray.size() != expert.nBiasCal()) {
568 B2ERROR("Number of bias is not equal to registered architecture!");
569 return false;
570 }
571
572 expert.setWeights(warray);
573 expert.setBias(barray);
574 return true;
575 }
576 }
577 }
578}

◆ load() [2/2]

bool load ( unsigned isector,
std::vector< float > weight,
std::vector< float > bias )

Load MLPs from file.

Parameters
isectorindex of the MLP
weightarray from database
biasarray from database
Returns
true if the MLPs were loaded correctly

Definition at line 582 of file GRLNeuro.cc.

583{
584 GRLMLP& expert = m_MLPs[isector];
585 expert.setWeights(warray);
586 expert.setBias(barray);
587 return true;
588}

◆ nSectors()

unsigned nSectors ( ) const
inline

return number of neural networks

Definition at line 84 of file GRLNeuro.h.

84{ return m_MLPs.size(); }

◆ operator[]() [1/2]

GRLMLP & operator[] ( unsigned index)
inline

return reference to a neural network

Definition at line 80 of file GRLNeuro.h.

80{ return m_MLPs[index]; }

◆ operator[]() [2/2]

const GRLMLP & operator[] ( unsigned index) const
inline

return const reference to a neural network

Definition at line 82 of file GRLNeuro.h.

82{ return m_MLPs[index]; }

◆ runMLP()

float runMLP ( unsigned isector,
const std::vector< float > & input )

Run an expert MLP.

Parameters
isectorindex of the MLP
inputvector of input values
Returns
output values (classifier)

Definition at line 389 of file GRLNeuro.cc.

390{
391
392
393 const GRLMLP& expert = m_MLPs[isector];
394 vector<float> weights = expert.getWeights();
395 vector<float> bias = expert.getBias();
396 vector<float> layerinput = input;
397
398 vector<float> layeroutput2 = {};
399 vector<float> layeroutput3 = {};
400 vector<float> layeroutput4 = {};
401
403 for (size_t i = 0; i < layerinput.size(); ++i) {
404 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
405 }
406 layeroutput2.clear();
407 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
408
409 unsigned num_inputs = layerinput.size();
410 unsigned num_neurons = expert.getNumberOfNodesLayer(2); // 64
411 for (unsigned io = 0; io < num_neurons; ++io) {
412 float bias_raw = bias[io];
413 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
414 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
415 layeroutput2[io] = bias_contrib;
416 }
417
418 unsigned iw = 0;
419// input*weight
420 for (unsigned ii = 0; ii < num_inputs; ++ii) {
421 float input_val = layerinput[ii];
422 for (unsigned io = 0; io < num_neurons; ++io) {
423 float weight_raw = weights[iw];
424 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
425 float product = input_val * weight_fixed;
426 float contrib = sim_fix_dense_0_accum_t(product);
427
428 layeroutput2[io] += contrib;
429
430 ++iw;
431 }
432 }
433
434
435//apply activation function, ReLU for hidden layer and output layer
436// === dense_0_t + ReLU ===
437 std::vector<float> layeroutput2_fixed_relu(num_neurons);
438
439 for (unsigned io = 0; io < num_neurons; ++io) {
440 // dense_0_t)
441 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
442
443 // ReLU
444 float relu_val = (fixed_val > 0) ? fixed_val : 0;
445
446 layeroutput2_fixed_relu[io] = relu_val;
447
448 }
449
450 std::vector<float> dense1_input(64);
451 for (unsigned i = 0; i < 64; ++i) {
452 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
453 }
454
455 layeroutput3.clear();
456 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
457 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
458 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
459 for (unsigned io = 64; io < num_neurons_1 + 64; ++io) {
460 float bias_raw = bias[io];
461 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
462 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
463 layeroutput3[io - 64] = bias_contrib;
464
465 }
466
467
468 for (unsigned ii = 0; ii < num_inputs_1; ++ii) {
469 float input_val = dense1_input[ii];
470 for (unsigned io = 0; io < num_neurons_1; ++io) {
471
472 float weight_raw = weights[iw];
473
474 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
475 float product = input_val * weight_fixed;
476 float contrib = sim_fix_dense_1_accum_t(product);
477
478 layeroutput3[io] += contrib;
479 ++iw;
480 }
481 }
482
483
484 std::vector<float> layeroutput3_fixed_relu(num_neurons);
485
486
487 for (unsigned io = 0; io < num_neurons_1; ++io) {
488 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
489 // ReLU
490 float relu_val = (fixed_val > 0) ? fixed_val : 0;
491
492 layeroutput3_fixed_relu[io] = relu_val;
493
494 }
495 std::vector<float> dense2_input(64);
496 for (unsigned i = 0; i < 64; ++i) {
497 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
498 }
499 layeroutput4.clear();
500 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
501
502 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
503 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
504 for (unsigned io = 128; io < num_neurons_2 + 128; ++io) {
505 float bias_raw = bias[io];
506 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
507 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
508 layeroutput4[io - 128] = bias_contrib;
509
510 }
511
512 for (unsigned ii = 0; ii < num_inputs_2; ++ii) {
513 float input_val = dense2_input[ii];
514 for (unsigned io = 0; io < num_neurons_2; ++io) {
515 float weight_raw = weights[iw];
516 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
517 float product = input_val * weight_fixed;
518 float contrib = sim_fix_dense_2_accum_t(product);
519
520 layeroutput4[io] += contrib;
521
522 ++iw;
523 }
524 }
525 return layeroutput4[0];
526
527}

◆ save()

void save ( const std::string & filename,
const std::string & arrayname = "MLPs" )

Save MLPs to file.

Parameters
filenamename of the TFile to write to
arraynamename of the TObjArray holding the MLPs in the file

Member Data Documentation

◆ m_MLPs

std::vector<GRLMLP> m_MLPs = {}
private

List of networks.

Definition at line 114 of file GRLNeuro.h.

114{};

The documentation for this class was generated from the following files: