Belle II Software development
GRLNeuro Class Reference

Class to represent the GRL Neuro. More...

#include <GRLNeuro.h>

Classes

struct  Parameters
 Struct to keep neurotrigger parameters. More...
 

Public Member Functions

 GRLNeuro ()
 Default constructor.
 
virtual ~GRLNeuro ()
 Default destructor.
 
void initialize (const Parameters &p)
 Set parameters and get some network independent parameters.
 
GRLMLPoperator[] (unsigned index)
 return reference to a neural network
 
const GRLMLPoperator[] (unsigned index) const
 return const reference to a neural network
 
unsigned nSectors () const
 return number of neural networks
 
void save (const std::string &filename, const std::string &arrayname="MLPs")
 Save MLPs to file.
 
bool load (unsigned isector, const std::string &wfilename, const std::string &bfilename)
 Load MLPs from file.
 
bool load (unsigned isector, std::vector< float > warray, std::vector< float > barray)
 
float runMLP (unsigned isector, const std::vector< float > &input)
 Run an expert MLP.
 

Private Attributes

std::vector< GRLMLPm_MLPs = {}
 List of networks.
 

Detailed Description

Class to represent the GRL Neuro.

The Neurotrigger consists of one or several Multi Layer Perceptrons. The input values are calculated from ECLTRG cluster and a 2D track estimate. The output is a scaled estimate of the judgement.

See also
GRLNeuro Modules:
GRLTrainerModule for preparing training data and training,
GRLNeuro for loading trained networks and using them.

Definition at line 34 of file GRLNeuro.h.

Constructor & Destructor Documentation

◆ GRLNeuro()

GRLNeuro ( )
inline

Default constructor.

Definition at line 100 of file GRLNeuro.h.

100{}

◆ ~GRLNeuro()

virtual ~GRLNeuro ( )
inlinevirtual

Default destructor.

Definition at line 103 of file GRLNeuro.h.

103{}

Member Function Documentation

◆ initialize()

void initialize ( const Parameters & p)

Set parameters and get some network independent parameters.

Definition at line 248 of file GRLNeuro.cc.

249{
250 using std::vector;
251
252 B2DEBUG(10, "GRLNeuro::initialize: nMLP=" << p.nMLP
253 << " nHidden.size=" << p.nHidden.size()
254 << " outputScale.size=" << p.outputScale.size());
255
256 // ------------------------------------------------------------------
257 // Basic parameter validation (fatal in initialize)
258 // ------------------------------------------------------------------
259
260 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
261 B2FATAL("Number of nHidden lists should be 1 or " << p.nMLP);
262 }
263
264 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
265 B2FATAL("Number of outputScale lists should be 1 or " << p.nMLP);
266 }
267
268 unsigned short nTarget = static_cast<unsigned short>(p.targetresult);
269 if (nTarget < 1) {
270 B2FATAL("No outputs! Turn on targetresult.");
271 }
272
273 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
274 if (p.outputScale[iScale].size() != 2 * nTarget) {
275 B2FATAL("outputScale should be exactly " << 2 * nTarget << " values");
276 }
277 }
278
279 // ------------------------------------------------------------------
280 // Comprehensive parameter-size validation
281 // ------------------------------------------------------------------
282
283 auto check_size = [&](const auto & v, const char* name) {
284 const size_t s = v.size();
285 if (s != 1 && s != static_cast<size_t>(p.nMLP)) {
286 B2FATAL(std::string(name) + " size (" + std::to_string(s)
287 + ") != 1 and != nMLP (" + std::to_string(p.nMLP) + ")");
288 }
289 };
290
291 // sector / structure parameters
292 check_size(p.i_cdc_sector, "i_cdc_sector");
293 check_size(p.i_ecl_sector, "i_ecl_sector");
294 check_size(p.nHidden, "nHidden");
295 check_size(p.outputScale, "outputScale");
296
297 // bias
298 check_size(p.total_bit_bias, "total_bit_bias");
299 check_size(p.int_bit_bias, "int_bit_bias");
300 check_size(p.is_signed_bias, "is_signed_bias");
301 check_size(p.rounding_bias, "rounding_bias");
302 check_size(p.saturation_bias, "saturation_bias");
303
304 // accumulator
305 check_size(p.total_bit_accum, "total_bit_accum");
306 check_size(p.int_bit_accum, "int_bit_accum");
307 check_size(p.is_signed_accum, "is_signed_accum");
308 check_size(p.rounding_accum, "rounding_accum");
309 check_size(p.saturation_accum, "saturation_accum");
310
311 // weights
312 check_size(p.total_bit_weight, "total_bit_weight");
313 check_size(p.int_bit_weight, "int_bit_weight");
314 check_size(p.is_signed_weight, "is_signed_weight");
315 check_size(p.rounding_weight, "rounding_weight");
316 check_size(p.saturation_weight, "saturation_weight");
317
318 // relu
319 check_size(p.total_bit_relu, "total_bit_relu");
320 check_size(p.int_bit_relu, "int_bit_relu");
321 check_size(p.is_signed_relu, "is_signed_relu");
322 check_size(p.rounding_relu, "rounding_relu");
323 check_size(p.saturation_relu, "saturation_relu");
324
325 // generic
326 check_size(p.total_bit, "total_bit");
327 check_size(p.int_bit, "int_bit");
328 check_size(p.is_signed, "is_signed");
329 check_size(p.rounding, "rounding");
330 check_size(p.saturation, "saturation");
331
332 // inputs
333 check_size(p.W_input, "W_input");
334 check_size(p.I_input, "I_input");
335
336 // ------------------------------------------------------------------
337 // Initialize MLPs
338 // ------------------------------------------------------------------
339
340 m_MLPs.clear();
341 m_MLPs.reserve(p.nMLP);
342
343 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
344
345 // helper: broadcast if size == 1
346 auto pick = [&](const auto & container) -> const auto& {
347 return (container.size() == 1) ? container[0] : container[iMLP];
348 };
349
350 unsigned short i_cdc = static_cast<unsigned short>(pick(p.i_cdc_sector));
351 unsigned short i_ecl = static_cast<unsigned short>(pick(p.i_ecl_sector));
352 unsigned short nInput = static_cast<unsigned short>(i_cdc + i_ecl);
353
354 // nHidden: vector<vector<float>>
355 const vector<float>& nhidden = pick(p.nHidden);
356 vector<unsigned short> nNodes = { nInput };
357
358 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
359 if (p.multiplyHidden) {
360 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid] * nNodes[0]));
361 } else {
362 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid]));
363 }
364 }
365
366 nNodes.push_back(nTarget);
367 unsigned short targetVars = static_cast<unsigned short>(p.targetresult);
368
369 const vector<float>& outputScale = pick(p.outputScale);
370
371 GRLMLP grlmlp_temp(nNodes, targetVars, outputScale);
372
373 // bias
374 grlmlp_temp.set_total_bit_bias(pick(p.total_bit_bias));
375 grlmlp_temp.set_int_bit_bias(pick(p.int_bit_bias));
376 grlmlp_temp.set_is_signed_bias(pick(p.is_signed_bias));
377 grlmlp_temp.set_rounding_bias(pick(p.rounding_bias));
378 grlmlp_temp.set_saturation_bias(pick(p.saturation_bias));
379
380 // accumulator
381 grlmlp_temp.set_total_bit_accum(pick(p.total_bit_accum));
382 grlmlp_temp.set_int_bit_accum(pick(p.int_bit_accum));
383 grlmlp_temp.set_is_signed_accum(pick(p.is_signed_accum));
384 grlmlp_temp.set_rounding_accum(pick(p.rounding_accum));
385 grlmlp_temp.set_saturation_accum(pick(p.saturation_accum));
386
387 // weights
388 grlmlp_temp.set_total_bit_weight(pick(p.total_bit_weight));
389 grlmlp_temp.set_int_bit_weight(pick(p.int_bit_weight));
390 grlmlp_temp.set_is_signed_weight(pick(p.is_signed_weight));
391 grlmlp_temp.set_rounding_weight(pick(p.rounding_weight));
392 grlmlp_temp.set_saturation_weight(pick(p.saturation_weight));
393
394 // relu
395 grlmlp_temp.set_total_bit_relu(pick(p.total_bit_relu));
396 grlmlp_temp.set_int_bit_relu(pick(p.int_bit_relu));
397 grlmlp_temp.set_is_signed_relu(pick(p.is_signed_relu));
398 grlmlp_temp.set_rounding_relu(pick(p.rounding_relu));
399 grlmlp_temp.set_saturation_relu(pick(p.saturation_relu));
400
401 // generic
402 grlmlp_temp.set_total_bit(pick(p.total_bit));
403 grlmlp_temp.set_int_bit(pick(p.int_bit));
404 grlmlp_temp.set_is_signed(pick(p.is_signed));
405 grlmlp_temp.set_rounding(pick(p.rounding));
406 grlmlp_temp.set_saturation(pick(p.saturation));
407
408 // inputs
409 grlmlp_temp.set_W_input(pick(p.W_input));
410 grlmlp_temp.set_I_input(pick(p.I_input));
411
412 m_MLPs.push_back(std::move(grlmlp_temp));
413 }
414
415 B2DEBUG(10, "GRLNeuro::initialize finished. created "
416 << m_MLPs.size() << " MLP(s).");
417}
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:137

◆ load() [1/2]

bool load ( unsigned isector,
const std::string & wfilename,
const std::string & bfilename )

Load MLPs from file.

Parameters
isectorindex of the MLP
wfilenamename of the TFile to read from
bfilenamename of the TObjArray holding the MLPs in the file
Returns
true if the MLPs were loaded correctly

Definition at line 559 of file GRLNeuro.cc.

560{
561 if (weightfilename.size() < 1) {
562 B2ERROR("Could not load Neurotrigger weights from database!");
563 return false;
564 } else if (biasfilename.size() < 1) {
565 B2ERROR("Could not load Neurotrigger bias from database!");
566 return false;
567 } else {
568 std::ifstream wfile(weightfilename);
569 if (!wfile.is_open()) {
570 B2WARNING("Could not open file " << weightfilename);
571 return false;
572 } else {
573 std::ifstream bfile(biasfilename);
574 if (!bfile.is_open()) {
575 B2WARNING("Could not open file " << biasfilename);
576 return false;
577 } else {
578 GRLMLP& expert = m_MLPs[isector];
579 std::vector<float> warray;
580 std::vector<float> barray;
581 warray.clear();
582 barray.clear();
583
584 float element;
585 while (wfile >> element) {
586 warray.push_back(element);
587 }
588 while (bfile >> element) {
589 barray.push_back(element);
590 }
591
592 if (warray.size() != expert.n_weights_cal()) {
593 B2ERROR("Number of weights is not equal to registered architecture!");
594 return false;
595 } else expert.set_weights(warray);
596 if (barray.size() != expert.n_bias_cal()) {
597 B2ERROR("Number of bias is not equal to registered architecture!");
598 return false;
599 }
600
601 expert.set_weights(warray);
602 expert.set_bias(barray);
603 return true;
604 }
605 }
606 }
607}

◆ load() [2/2]

bool load ( unsigned isector,
std::vector< float > warray,
std::vector< float > barray )

Definition at line 609 of file GRLNeuro.cc.

610{
611 GRLMLP& expert = m_MLPs[isector];
612 expert.set_weights(warray);
613 expert.set_bias(barray);
614 return true;
615}

◆ nSectors()

unsigned nSectors ( ) const
inline

return number of neural networks

Definition at line 113 of file GRLNeuro.h.

113{ return m_MLPs.size(); }

◆ operator[]() [1/2]

GRLMLP & operator[] ( unsigned index)
inline

return reference to a neural network

Definition at line 109 of file GRLNeuro.h.

109{ return m_MLPs[index]; }

◆ operator[]() [2/2]

const GRLMLP & operator[] ( unsigned index) const
inline

return const reference to a neural network

Definition at line 111 of file GRLNeuro.h.

111{ return m_MLPs[index]; }

◆ runMLP()

float runMLP ( unsigned isector,
const std::vector< float > & input )

Run an expert MLP.

Parameters
isectorindex of the MLP
inputvector of input values
Returns
output values (classifier)

Definition at line 422 of file GRLNeuro.cc.

423{
424 const GRLMLP& expert = m_MLPs[isector];
425 vector<float> weights = expert.get_weights();
426 vector<float> bias = expert.get_bias();
427 vector<int> total_bit_bias = expert.get_total_bit_bias();
428 vector<int> int_bit_bias = expert.get_int_bit_bias();
429 vector<bool> is_signed_bias = expert.get_is_signed_bias();
430 vector<int> rounding_bias = expert.get_rounding_bias();
431 vector<int> saturation_bias = expert.get_saturation_bias();
432 vector<int> total_bit_accum = expert.get_total_bit_accum();
433 vector<int> int_bit_accum = expert.get_int_bit_accum();
434 vector<bool> is_signed_accum = expert.get_is_signed_accum();
435 vector<int> rounding_accum = expert.get_rounding_accum();
436 vector<int> saturation_accum = expert.get_saturation_accum();
437 vector<int> total_bit_weight = expert.get_total_bit_weight();
438 vector<int> int_bit_weight = expert.get_int_bit_weight();
439 vector<bool> is_signed_weight = expert.get_is_signed_weight();
440 vector<int> rounding_weight = expert.get_rounding_weight();
441 vector<int> saturation_weight = expert.get_saturation_weight();
442 vector<int> total_bit_relu = expert.get_total_bit_relu();
443 vector<int> int_bit_relu = expert.get_int_bit_relu();
444 vector<bool> is_signed_relu = expert.get_is_signed_relu();
445 vector<int> rounding_relu = expert.get_rounding_relu();
446 vector<int> saturation_relu = expert.get_saturation_relu();
447 vector<int> total_bit = expert.get_total_bit();
448 vector<int> int_bit = expert.get_int_bit();
449 vector<bool> is_signed = expert.get_is_signed();
450 vector<int> rounding = expert.get_rounding();
451 vector<int> saturation = expert.get_saturation();
452 vector<vector<int>> W_input = expert.get_W_input();
453 vector<vector<int>> I_input = expert.get_I_input();
454
455
456 //input layer
457 vector<float> layerinput = input;
458
459 // quantizer the inputs
460 for (size_t i = 0; i < layerinput.size(); ++i) {
461
462 int W_arr[24] = { 12, 12, 11, 11, 8, 8, 7, 7, 5, 6, 6, 6, 8, 8, 6, 5, 4, 5, 7, 7, 6, 4, 5, 5 };
463 int I_arr[24] = { 12, 12, 11, 11, 10, 9, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
464 if (i != 23) {
465 layerinput[i] = sim_input_layer_t(layerinput[i]);
466 layerinput[i] = sim_ap_dense_0_iq(layerinput[i], W_arr[i], I_arr[i]);
467
468 } else layerinput[i] = 0;
469 }
470
471 //hidden layer and output layer
472 vector<float> layeroutput = {};
473 unsigned num_layers = expert.get_number_of_layers();
474
475 unsigned num_total_neurons = 0;
476 unsigned iw = 0;
477 for (unsigned i_layer = 0; i_layer < num_layers - 1; i_layer++) {
478 //read bias
479 unsigned num_neurons = expert.get_number_of_nodes_layer(i_layer + 1);
480 layeroutput.clear();
481 layeroutput.assign(num_neurons, 0.);
482 layeroutput.shrink_to_fit();
483
484 for (unsigned io = 0; io < num_neurons; ++io) {
485 float bias_raw = bias[io + num_total_neurons];
486 float bias_fixed = sim_fixed(bias_raw, total_bit_bias[i_layer], int_bit_bias[i_layer], is_signed_bias[i_layer],
487 rounding_bias[i_layer], saturation_bias[i_layer]);
488 float bias_contrib = sim_fixed(bias_fixed, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
489 rounding_accum[i_layer], saturation_accum[i_layer]);
490 layeroutput[io] = bias_contrib;
491
492 }
493 num_total_neurons += num_neurons;
494
495 //input*weight
496 unsigned num_inputs = layerinput.size();
497 for (unsigned ii = 0; ii < num_inputs; ++ii) {
498 float input_val = layerinput[ii];
499 for (unsigned io = 0; io < num_neurons; ++io) {
500 float weight_raw = weights[iw];
501 float weight_fixed = sim_fixed(weight_raw, total_bit_weight[i_layer], int_bit_weight[i_layer], is_signed_weight[i_layer],
502 rounding_weight[i_layer], saturation_weight[i_layer]);
503 float product = input_val * weight_fixed;
504 float contrib = sim_fixed(product, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
505 rounding_accum[i_layer], saturation_accum[i_layer]);
506 layeroutput[io] += contrib;
507 ++iw;
508 }
509 }
510 // input*weight done
511 if (i_layer < num_layers - 2) {
512 //relu
513 for (unsigned io = 0; io < num_neurons; ++io) {
514 float fixed_val = sim_fixed(layeroutput[io], total_bit[i_layer], int_bit[i_layer], is_signed[i_layer], rounding[i_layer],
515 saturation[i_layer]);
516 float relu_val = (fixed_val > 0) ? fixed_val : 0;
517 layeroutput[io] = sim_fixed(relu_val, total_bit_relu[i_layer], int_bit_relu[i_layer], is_signed_relu[i_layer],
518 rounding_relu[i_layer], saturation_relu[i_layer]);
519 }
520
521
522 //input to next layer
523 layerinput.clear();
524 layerinput.assign(num_neurons, 0);
525 layerinput.shrink_to_fit();
526
527 for (unsigned i = 0; i < num_neurons; ++i) {
528
529 layerinput[i] = sim_ap_fixed(layeroutput[i], W_input[i_layer][i], I_input[i_layer][i], false, 1, 4, 0);
530
531 }
532 if (i_layer == 0) {}
533 else if (i_layer == 1) {
534
535 layerinput[1] = 0;
536 layerinput[10] = 0;
537 layerinput[18] = 0;
538
539 } else if (i_layer == 2) {
540 layerinput[2] = 0;
541 layerinput[16] = 0;
542
543 }
544
545
546 } else {
547
548
549 // === HLS result_t: ap_fixed<19,5,AP_RND,AP_SAT_SYM,0> ===
550 float final_fixed = sim_result_t(layeroutput[0]);
551
552 return final_fixed;
553 }
554 }
555 return 0;
556}

◆ save()

void save ( const std::string & filename,
const std::string & arrayname = "MLPs" )

Save MLPs to file.

Parameters
filenamename of the TFile to write to
arraynamename of the TObjArray holding the MLPs in the file

Member Data Documentation

◆ m_MLPs

std::vector<GRLMLP> m_MLPs = {}
private

List of networks.

Definition at line 137 of file GRLNeuro.h.

137{};

The documentation for this class was generated from the following files: