Belle II Software prerelease-11-00-00a
GRLNeuro Class Reference

Class to represent the GRL Neuro. More...

#include <GRLNeuro.h>

Collaboration diagram for GRLNeuro:

Classes

struct  Parameters
 Struct to keep neurotrigger parameters. More...
 

Public Member Functions

 GRLNeuro ()
 Default constructor.
 
virtual ~GRLNeuro ()
 Default destructor.
 
void initialize (const Parameters &p)
 Set parameters and get some network independent parameters.
 
GRLMLPoperator[] (unsigned index)
 return reference to a neural network
 
const GRLMLPoperator[] (unsigned index) const
 return const reference to a neural network
 
unsigned nSectors () const
 return number of neural networks
 
void save (const std::string &filename, const std::string &arrayname="MLPs")
 Save MLPs to file.
 
bool load (unsigned isector, const std::string &wfilename, const std::string &bfilename)
 Load MLPs from file.
 
bool load (unsigned isector, std::vector< float > warray, std::vector< float > barray)
 
std::vector< float > runMLP (unsigned isector, const std::vector< float > &input)
 Run an expert MLP.
 

Private Attributes

std::vector< GRLMLPm_MLPs = {}
 List of networks.
 

Detailed Description

Class to represent the GRL Neuro.

The Neurotrigger consists of one or several Multi Layer Perceptrons. The input values are calculated from ECLTRG cluster and a 2D track estimate. The output is a scaled estimate of the judgement.

See also
GRLNeuro Modules:
GRLTrainerModule for preparing training data and training,
GRLNeuro for loading trained networks and using them.

Definition at line 34 of file GRLNeuro.h.

Constructor & Destructor Documentation

◆ GRLNeuro()

GRLNeuro ( )
inline

Default constructor.

Definition at line 100 of file GRLNeuro.h.

100{}

◆ ~GRLNeuro()

virtual ~GRLNeuro ( )
inlinevirtual

Default destructor.

Definition at line 103 of file GRLNeuro.h.

103{}

Member Function Documentation

◆ initialize()

void initialize ( const Parameters & p)

Set parameters and get some network independent parameters.

Definition at line 248 of file GRLNeuro.cc.

249{
250 using std::vector;
251
252 B2DEBUG(10, "GRLNeuro::initialize: nMLP=" << p.nMLP);
253
254 // ------------------------------------------------------------------
255 // Basic parameter validation (fatal in initialize)
256 // ------------------------------------------------------------------
257
258 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
259 B2FATAL("Number of nHidden lists should be 1 or " << p.nMLP);
260 }
261
262 // ------------------------------------------------------------------
263 // Comprehensive parameter-size validation
264 // ------------------------------------------------------------------
265
266 auto check_size = [&](const auto & v, const char* name) {
267 const size_t s = v.size();
268 if (s != 1 && s != static_cast<size_t>(p.nMLP)) {
269 B2FATAL(std::string(name) + " size (" + std::to_string(s)
270 + ") != 1 and != nMLP (" + std::to_string(p.nMLP) + ")");
271 }
272 };
273
274 // sector / structure parameters
275 check_size(p.i_cdc_sector, "i_cdc_sector");
276 check_size(p.i_ecl_sector, "i_ecl_sector");
277 check_size(p.nHidden, "nHidden");
278 check_size(p.nn_thres, "nn_thres");
279
280 // bias
281 check_size(p.total_bit_bias, "total_bit_bias");
282 check_size(p.int_bit_bias, "int_bit_bias");
283 check_size(p.is_signed_bias, "is_signed_bias");
284 check_size(p.rounding_bias, "rounding_bias");
285 check_size(p.saturation_bias, "saturation_bias");
286
287 // accumulator
288 check_size(p.total_bit_accum, "total_bit_accum");
289 check_size(p.int_bit_accum, "int_bit_accum");
290 check_size(p.is_signed_accum, "is_signed_accum");
291 check_size(p.rounding_accum, "rounding_accum");
292 check_size(p.saturation_accum, "saturation_accum");
293
294 // weights
295 check_size(p.total_bit_weight, "total_bit_weight");
296 check_size(p.int_bit_weight, "int_bit_weight");
297 check_size(p.is_signed_weight, "is_signed_weight");
298 check_size(p.rounding_weight, "rounding_weight");
299 check_size(p.saturation_weight, "saturation_weight");
300
301 // relu
302 check_size(p.total_bit_relu, "total_bit_relu");
303 check_size(p.int_bit_relu, "int_bit_relu");
304 check_size(p.is_signed_relu, "is_signed_relu");
305 check_size(p.rounding_relu, "rounding_relu");
306 check_size(p.saturation_relu, "saturation_relu");
307
308 // generic
309 check_size(p.total_bit, "total_bit");
310 check_size(p.int_bit, "int_bit");
311 check_size(p.is_signed, "is_signed");
312 check_size(p.rounding, "rounding");
313 check_size(p.saturation, "saturation");
314
315 // inputs
316 check_size(p.W_input, "W_input");
317 check_size(p.I_input, "I_input");
318
319 // ------------------------------------------------------------------
320 // Initialize MLPs
321 // ------------------------------------------------------------------
322
323 m_MLPs.clear();
324 m_MLPs.reserve(p.nMLP);
325
326 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
327
328 // helper: broadcast if size == 1
329 auto pick = [&](const auto & container) -> const auto& {
330 return (container.size() == 1) ? container[0] : container[iMLP];
331 };
332
333 unsigned short i_cdc = static_cast<unsigned short>(pick(p.i_cdc_sector));
334 unsigned short i_ecl = static_cast<unsigned short>(pick(p.i_ecl_sector));
335 unsigned short nInput = static_cast<unsigned short>(i_cdc + i_ecl);
336
337 // nHidden: vector<vector<float>>
338 const vector<float>& nhidden = pick(p.nHidden);
339 vector<unsigned short> nNodes = { nInput };
340
341 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
342 if (p.multiplyHidden) {
343 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid] * nNodes[0]));
344 } else {
345 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid]));
346 }
347 }
348 nNodes.push_back(static_cast<float>(pick(p.nOutput)));
349
350 GRLMLP grlmlp_temp(nNodes);
351
352 // bias
353 grlmlp_temp.set_total_bit_bias(pick(p.total_bit_bias));
354 grlmlp_temp.set_int_bit_bias(pick(p.int_bit_bias));
355 grlmlp_temp.set_is_signed_bias(pick(p.is_signed_bias));
356 grlmlp_temp.set_rounding_bias(pick(p.rounding_bias));
357 grlmlp_temp.set_saturation_bias(pick(p.saturation_bias));
358
359 // accumulator
360 grlmlp_temp.set_total_bit_accum(pick(p.total_bit_accum));
361 grlmlp_temp.set_int_bit_accum(pick(p.int_bit_accum));
362 grlmlp_temp.set_is_signed_accum(pick(p.is_signed_accum));
363 grlmlp_temp.set_rounding_accum(pick(p.rounding_accum));
364 grlmlp_temp.set_saturation_accum(pick(p.saturation_accum));
365
366 // weights
367 grlmlp_temp.set_total_bit_weight(pick(p.total_bit_weight));
368 grlmlp_temp.set_int_bit_weight(pick(p.int_bit_weight));
369 grlmlp_temp.set_is_signed_weight(pick(p.is_signed_weight));
370 grlmlp_temp.set_rounding_weight(pick(p.rounding_weight));
371 grlmlp_temp.set_saturation_weight(pick(p.saturation_weight));
372
373 // relu
374 grlmlp_temp.set_total_bit_relu(pick(p.total_bit_relu));
375 grlmlp_temp.set_int_bit_relu(pick(p.int_bit_relu));
376 grlmlp_temp.set_is_signed_relu(pick(p.is_signed_relu));
377 grlmlp_temp.set_rounding_relu(pick(p.rounding_relu));
378 grlmlp_temp.set_saturation_relu(pick(p.saturation_relu));
379
380 // generic
381 grlmlp_temp.set_total_bit(pick(p.total_bit));
382 grlmlp_temp.set_int_bit(pick(p.int_bit));
383 grlmlp_temp.set_is_signed(pick(p.is_signed));
384 grlmlp_temp.set_rounding(pick(p.rounding));
385 grlmlp_temp.set_saturation(pick(p.saturation));
386
387 // inputs
388 grlmlp_temp.set_W_input(pick(p.W_input));
389 grlmlp_temp.set_I_input(pick(p.I_input));
390
391 //threshold
392 grlmlp_temp.set_nn_thres(pick(p.nn_thres));
393
394 m_MLPs.push_back(std::move(grlmlp_temp));
395 }
396
397 B2DEBUG(10, "GRLNeuro::initialize finished. created "
398 << m_MLPs.size() << " MLP(s).");
399}
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:137

◆ load() [1/2]

bool load ( unsigned isector,
const std::string & wfilename,
const std::string & bfilename )

Load MLPs from file.

Parameters
isectorindex of the MLP
wfilenamename of the TFile to read from
bfilenamename of the TObjArray holding the MLPs in the file
Returns
true if the MLPs were loaded correctly

Definition at line 545 of file GRLNeuro.cc.

546{
547 if (weightfilename.size() < 1) {
548 B2ERROR("Could not load Neurotrigger weights from database!");
549 return false;
550 } else if (biasfilename.size() < 1) {
551 B2ERROR("Could not load Neurotrigger bias from database!");
552 return false;
553 } else {
554 std::ifstream wfile(weightfilename);
555 if (!wfile.is_open()) {
556 B2WARNING("Could not open file " << weightfilename);
557 return false;
558 } else {
559 std::ifstream bfile(biasfilename);
560 if (!bfile.is_open()) {
561 B2WARNING("Could not open file " << biasfilename);
562 return false;
563 } else {
564 GRLMLP& expert = m_MLPs[isector];
565 std::vector<float> warray;
566 std::vector<float> barray;
567 warray.clear();
568 barray.clear();
569
570 float element;
571 while (wfile >> element) {
572 warray.push_back(element);
573 }
574 while (bfile >> element) {
575 barray.push_back(element);
576 }
577
578 if (warray.size() != expert.n_weights_cal()) {
579 B2ERROR("Number of weights is not equal to registered architecture!");
580 return false;
581 } else expert.set_weights(warray);
582 if (barray.size() != expert.n_bias_cal()) {
583 B2ERROR("Number of bias is not equal to registered architecture!");
584 return false;
585 }
586
587 expert.set_weights(warray);
588 expert.set_bias(barray);
589 return true;
590 }
591 }
592 }
593}

◆ load() [2/2]

bool load ( unsigned isector,
std::vector< float > warray,
std::vector< float > barray )

Definition at line 595 of file GRLNeuro.cc.

596{
597 GRLMLP& expert = m_MLPs[isector];
598 expert.set_weights(warray);
599 expert.set_bias(barray);
600 return true;
601}

◆ nSectors()

unsigned nSectors ( ) const
inline

return number of neural networks

Definition at line 113 of file GRLNeuro.h.

113{ return m_MLPs.size(); }

◆ operator[]() [1/2]

GRLMLP & operator[] ( unsigned index)
inline

return reference to a neural network

Definition at line 109 of file GRLNeuro.h.

109{ return m_MLPs[index]; }

◆ operator[]() [2/2]

const GRLMLP & operator[] ( unsigned index) const
inline

return const reference to a neural network

Definition at line 111 of file GRLNeuro.h.

111{ return m_MLPs[index]; }

◆ runMLP()

std::vector< float > runMLP ( unsigned isector,
const std::vector< float > & input )

Run an expert MLP.

Parameters
isectorindex of the MLP
inputvector of input values
Returns
output values (classifier)

Definition at line 404 of file GRLNeuro.cc.

405{
406 const GRLMLP& expert = m_MLPs[isector];
407 vector<float> weights = expert.get_weights();
408 vector<float> bias = expert.get_bias();
409 vector<int> total_bit_bias = expert.get_total_bit_bias();
410 vector<int> int_bit_bias = expert.get_int_bit_bias();
411 vector<bool> is_signed_bias = expert.get_is_signed_bias();
412 vector<int> rounding_bias = expert.get_rounding_bias();
413 vector<int> saturation_bias = expert.get_saturation_bias();
414 vector<int> total_bit_accum = expert.get_total_bit_accum();
415 vector<int> int_bit_accum = expert.get_int_bit_accum();
416 vector<bool> is_signed_accum = expert.get_is_signed_accum();
417 vector<int> rounding_accum = expert.get_rounding_accum();
418 vector<int> saturation_accum = expert.get_saturation_accum();
419 vector<int> total_bit_weight = expert.get_total_bit_weight();
420 vector<int> int_bit_weight = expert.get_int_bit_weight();
421 vector<bool> is_signed_weight = expert.get_is_signed_weight();
422 vector<int> rounding_weight = expert.get_rounding_weight();
423 vector<int> saturation_weight = expert.get_saturation_weight();
424 vector<int> total_bit_relu = expert.get_total_bit_relu();
425 vector<int> int_bit_relu = expert.get_int_bit_relu();
426 vector<bool> is_signed_relu = expert.get_is_signed_relu();
427 vector<int> rounding_relu = expert.get_rounding_relu();
428 vector<int> saturation_relu = expert.get_saturation_relu();
429 vector<int> total_bit = expert.get_total_bit();
430 vector<int> int_bit = expert.get_int_bit();
431 vector<bool> is_signed = expert.get_is_signed();
432 vector<int> rounding = expert.get_rounding();
433 vector<int> saturation = expert.get_saturation();
434 vector<vector<int>> W_input = expert.get_W_input();
435 vector<vector<int>> I_input = expert.get_I_input();
436
437
438 //input layer
439 vector<float> layerinput = input;
440
441 // quantizer the inputs
442 for (size_t i = 0; i < layerinput.size(); ++i) {
443
444 int W_arr[24] = { 12, 12, 11, 11, 8, 8, 7, 7, 5, 6, 6, 6, 8, 8, 6, 5, 4, 5, 7, 7, 6, 4, 5, 5 };
445 int I_arr[24] = { 12, 12, 11, 11, 10, 9, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
446 if (i != 23) {
447 layerinput[i] = sim_input_layer_t(layerinput[i]);
448 layerinput[i] = sim_ap_dense_0_iq(layerinput[i], W_arr[i], I_arr[i]);
449
450 } else layerinput[i] = 0;
451 }
452
453 //hidden layer and output layer
454 vector<float> layeroutput = {};
455 unsigned num_layers = expert.get_number_of_layers();
456
457 //output layer
458 vector<float> final_fixed = {};
459
460 unsigned num_total_neurons = 0;
461 unsigned iw = 0;
462 for (unsigned i_layer = 0; i_layer < num_layers - 1; i_layer++) {
463 //read bias
464 unsigned num_neurons = expert.get_number_of_nodes_layer(i_layer + 1);
465 layeroutput.clear();
466 layeroutput.assign(num_neurons, 0.);
467 layeroutput.shrink_to_fit();
468
469 for (unsigned io = 0; io < num_neurons; ++io) {
470 float bias_raw = bias[io + num_total_neurons];
471 float bias_fixed = sim_fixed(bias_raw, total_bit_bias[i_layer], int_bit_bias[i_layer], is_signed_bias[i_layer],
472 rounding_bias[i_layer], saturation_bias[i_layer]);
473 float bias_contrib = sim_fixed(bias_fixed, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
474 rounding_accum[i_layer], saturation_accum[i_layer]);
475 layeroutput[io] = bias_contrib;
476
477 }
478 num_total_neurons += num_neurons;
479
480 //input*weight
481 unsigned num_inputs = layerinput.size();
482 for (unsigned ii = 0; ii < num_inputs; ++ii) {
483 float input_val = layerinput[ii];
484 for (unsigned io = 0; io < num_neurons; ++io) {
485 float weight_raw = weights[iw];
486 float weight_fixed = sim_fixed(weight_raw, total_bit_weight[i_layer], int_bit_weight[i_layer], is_signed_weight[i_layer],
487 rounding_weight[i_layer], saturation_weight[i_layer]);
488 float product = input_val * weight_fixed;
489 float contrib = sim_fixed(product, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
490 rounding_accum[i_layer], saturation_accum[i_layer]);
491 layeroutput[io] += contrib;
492 ++iw;
493 }
494 }
495 // input*weight done
496 if (i_layer < num_layers - 2) {
497 //relu
498 for (unsigned io = 0; io < num_neurons; ++io) {
499 float fixed_val = sim_fixed(layeroutput[io], total_bit[i_layer], int_bit[i_layer], is_signed[i_layer], rounding[i_layer],
500 saturation[i_layer]);
501 float relu_val = (fixed_val > 0) ? fixed_val : 0;
502 layeroutput[io] = sim_fixed(relu_val, total_bit_relu[i_layer], int_bit_relu[i_layer], is_signed_relu[i_layer],
503 rounding_relu[i_layer], saturation_relu[i_layer]);
504 }
505
506
507 //input to next layer
508 layerinput.clear();
509 layerinput.assign(num_neurons, 0);
510 layerinput.shrink_to_fit();
511
512 for (unsigned i = 0; i < num_neurons; ++i) {
513
514 layerinput[i] = sim_ap_fixed(layeroutput[i], W_input[i_layer][i], I_input[i_layer][i], false, 1, 4, 0);
515
516 }
517 if (i_layer == 0) {}
518 else if (i_layer == 1) {
519
520 layerinput[1] = 0;
521 layerinput[10] = 0;
522 layerinput[18] = 0;
523
524 } else if (i_layer == 2) {
525 layerinput[2] = 0;
526 layerinput[16] = 0;
527
528 }
529
530
531 } else {
532
533 // === HLS result_t: ap_fixed<19,5,AP_RND,AP_SAT_SYM,0> ===
534 unsigned num_final_fixed = layeroutput.size();
535 for (unsigned io = 0; io < num_final_fixed; ++io) {
536 final_fixed.push_back(sim_result_t(layeroutput[io]));
537 }
538
539 }
540 }
541 return final_fixed;
542}

◆ save()

void save ( const std::string & filename,
const std::string & arrayname = "MLPs" )

Save MLPs to file.

Parameters
filenamename of the TFile to write to
arraynamename of the TObjArray holding the MLPs in the file

Member Data Documentation

◆ m_MLPs

std::vector<GRLMLP> m_MLPs = {}
private

List of networks.

Definition at line 137 of file GRLNeuro.h.

137{};

The documentation for this class was generated from the following files: