Belle II Software development
GRLNeuro.cc
1/**************************************************************************
2 * basf2 (Belle II Analysis Software Framework) *
3 * Author: The Belle II Collaboration *
4 * *
5 * See git log for contributors and copyright holders. *
6 * This file is licensed under LGPL-3.0, see LICENSE.md. *
7 **************************************************************************/
8
9#include <framework/logging/Logger.h>
10#include <trg/grl/GRLNeuro.h>
11#include <trg/grl/dataobjects/GRLMLP.h>
12#include <cdc/geometry/CDCGeometryPar.h>
13#include <framework/gearbox/Const.h>
14#include <framework/gearbox/Unit.h>
15#include <framework/datastore/StoreObjPtr.h>
16#include <framework/datastore/StoreArray.h>
17#include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
18#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
19#include <string>
20#include <cmath>
21#include <TFile.h>
22#include <algorithm>
23#include <iomanip>
24#include <iostream>
25#include <vector>
26#include <array>
27#include <cstdint>
28#include <utility>
29using namespace Belle2;
30using namespace CDC;
31using namespace std;
32
33
34
35// ========== ap_(u)fixed ========
36
59inline float sim_ap_fixed(float val, int total_bits, int int_bits,
60 bool is_signed = true,
61 int quant_mode = 0,
62 int overflow_mode = 0,
63 int saturation_bits = 0)
64{
65 //for normal ap_types
66 if (total_bits >= int_bits) {
67
68 // Effective bits (handle saturation_bits and total_bits < int_bits cases)
69 int effective_total = (saturation_bits > 0) ?
70 std::min(saturation_bits, total_bits) : total_bits;
71 int effective_I = std::min(int_bits, effective_total);
72 int F = effective_total - effective_I; // Fractional bits ≥ 0
73 if (int_bits == 0 && is_signed == true) {
74 F = total_bits ;
75 effective_I = total_bits + 1;
76 }
77
78 // --- 2. Fast Scale Calculation (2^F) ---
79 const float scale = [F]() {
80 if (F <= 0) {return 1.0f;} // No fractional bits
81 else {
82 // Bit-shift is faster than std::pow(2, F)
83 const uint32_t scale_int = 1U << F;
84 return 1.0f / static_cast<float>(scale_int);
85 }
86 }();
87
88 // --- 3. Quantization (Mode-Specific Rounding) ---
89 const auto quantize = [](float scaled_val, int mode) -> int64_t {
90 using QuantizerFuncPtr = int64_t(*)(float);
91 static constexpr QuantizerFuncPtr quantizers[] = {
92 [](float x) -> int64_t { return static_cast<int64_t>(std::trunc(x)); },
93 [](float x) -> int64_t { return static_cast<int64_t>(std::round(x)); },
94 [](float x) -> int64_t {
95 return (x >= 0) ? static_cast<int64_t>(std::floor(x))
96 : static_cast<int64_t>(std::ceil(x));
97 },
98 [](float x) -> int64_t { return static_cast<int64_t>(std::floor(x)); },
99 [](float x) -> int64_t { return static_cast<int64_t>(std::ceil(x)); },
100 [](float x) -> int64_t { return llrint(x); }
101 };
102 const int clamped_mode = std::clamp(mode, 0, static_cast<int>(sizeof(quantizers) / sizeof(quantizers[0]) - 1));
103 return quantizers[clamped_mode](scaled_val);
104 };
105
106 int64_t fixed_val = quantize(val / scale, quant_mode);
107
108 // --- 4. Dynamic Range Calculation ---
109 const auto [min_val, max_val] = [ = ]() -> std::pair<int64_t, int64_t> {
110 if (is_signed)
111 {
112 const int64_t int_max = (1LL << (effective_I - 1)) - 1;
113 const int64_t int_min = -(1LL << (effective_I - 1));
114 const int64_t frac_mask = (F > 0) ? ((1LL << F) - 1) : 0;
115 return {int_min << F, (int_max << F) | frac_mask};
116 } else
117 {
118 const int64_t int_max = (1LL << effective_I) - 1;
119 const int64_t frac_mask = (F > 0) ? ((1LL << F) - 1) : 0;
120 return {0, (int_max << F) | frac_mask};
121 }
122 }();
123
124
125 // --- 5. Overflow Handling ---
126 const auto handle_overflow = [&]() {
127 const int64_t bit_mask = (1LL << effective_total) - 1;
128 const int64_t sign_bit = 1LL << (effective_total - 1);
129
130 switch (overflow_mode) {
131 case 0: // AP_SAT: Clamp to [min_val, max_val]
132 fixed_val = std::clamp(fixed_val, min_val, max_val);
133 break;
134
135 case 1: // AP_SAT_ZERO: Set to 0 on overflow
136 if (fixed_val > max_val || fixed_val < min_val) fixed_val = 0;
137 break;
138
139 case 2: // AP_WRAP: Modular wrap-around
140 if (is_signed) {
141 fixed_val = (fixed_val & bit_mask);
142 if (fixed_val & sign_bit) {
143 fixed_val -= (1LL << effective_total);
144 }
145 } else {
146 fixed_val &= bit_mask;
147 }
148 break;
149
150 case 3: // AP_WRAP_SM: Sign-magnitude wrap
151 if (is_signed) {
152 const int64_t magnitude_mask = (1LL << (effective_total - 1)) - 1;
153 const bool is_negative = (fixed_val < 0);
154 fixed_val = std::abs(fixed_val) & magnitude_mask;
155 if (is_negative) fixed_val = -fixed_val;
156 } else {
157 fixed_val &= bit_mask;
158 }
159 break;
160
161 case 4: // AP_SAT_SYM: Symmetric saturation
162 if (is_signed) {
163 const int64_t sym_limit = std::min(max_val, -min_val);
164 fixed_val = std::clamp(fixed_val, -sym_limit, sym_limit);
165 } else {
166 fixed_val = std::min(fixed_val, max_val);
167 }
168 break;
169
170 default: // Default to AP_WRAP
171 fixed_val &= bit_mask;
172 if (is_signed && (fixed_val & sign_bit)) {
173 fixed_val -= (1LL << effective_total);
174 }
175 }
176 };
177 handle_overflow();
178
179 // --- 6. Final Conversion ---
180 return static_cast<float>(fixed_val) * scale;
181 }
182 //for sperical case with total_bits < int-bits (only used to this code, need modify for generic using)
183 else {
184 // 1. calculate the bits
185 const int zero_bits = std::max(int_bits - total_bits, 0);//So called negative fraction bits
186 const int useful_bits = total_bits - 1;
187
188 // 2. value collections
189 std::vector<float> legal_values;
190 for (int64_t i = 0; i < (1LL << useful_bits); ++i) {
191 int64_t value = i << zero_bits;
192 legal_values.push_back(static_cast<float>(value));
193 legal_values.push_back(static_cast<float>(-1 * value)); //need confirm
194 }
195 std::sort(legal_values.begin(), legal_values.end());
196
197 // 3. find the closest values in legal_values
198 auto closest = std::min_element(legal_values.begin(), legal_values.end(),
199 [val](float a, float b) {
200 float dist_a = std::abs(a - val);
201 float dist_b = std::abs(b - val);
202
203 if (std::abs(dist_a - dist_b) < 1e-6f) {
204 return a > b;
205 }
206 return dist_a < dist_b;
207 });
208
209 return *closest;
210 }
211
212}
213
214// --- Wrappers for this code---
215
216//=== for input layer ===//
217inline float sim_input_layer_t(float val)
218{
219 return sim_ap_fixed(val, 13, 12, false, 0, 4, 0); // AP_TRN,AP_SAT
220}
221
222inline float sim_dense_0_iq_t(float val)
223{
224 return sim_ap_fixed(val, 12, 12, false, 1, 2, 0); // AP_RND,AP_WAP
225}
226
227inline float sim_ap_dense_0_iq(float val, int w, int i)
228{
229 return sim_ap_fixed(val, w, i, false, 1, 4, 0); // AP_RND, AP_SAT_SYM
230}
231
232// === for weights, bias and hiden layers ===
233inline float sim_fixed(float val, int total_bits, int int_bits, bool is_signed = true,
234 int rounding = 0, // 0: trunc, 1: round
235 int saturation = 2) // 2: wrap
236{
237 return sim_ap_fixed(val, total_bits, int_bits, is_signed, rounding, saturation, 0); // AP_TRN, AP_SAT_SYM
238}
239
240// === unsign for result ===
241inline float sim_result_t(float val)
242{
243 return sim_ap_fixed(val, 25, 9, true, 0, 2, 0); // AP_RND, AP_SAT_SYM
244}
245
246
247void
249{
250 using std::vector;
251
252 B2DEBUG(10, "GRLNeuro::initialize: nMLP=" << p.nMLP
253 << " nHidden.size=" << p.nHidden.size()
254 << " outputScale.size=" << p.outputScale.size());
255
256 // ------------------------------------------------------------------
257 // Basic parameter validation (fatal in initialize)
258 // ------------------------------------------------------------------
259
260 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
261 B2FATAL("Number of nHidden lists should be 1 or " << p.nMLP);
262 }
263
264 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
265 B2FATAL("Number of outputScale lists should be 1 or " << p.nMLP);
266 }
267
268 unsigned short nTarget = static_cast<unsigned short>(p.targetresult);
269 if (nTarget < 1) {
270 B2FATAL("No outputs! Turn on targetresult.");
271 }
272
273 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
274 if (p.outputScale[iScale].size() != 2 * nTarget) {
275 B2FATAL("outputScale should be exactly " << 2 * nTarget << " values");
276 }
277 }
278
279 // ------------------------------------------------------------------
280 // Comprehensive parameter-size validation
281 // ------------------------------------------------------------------
282
283 auto check_size = [&](const auto & v, const char* name) {
284 const size_t s = v.size();
285 if (s != 1 && s != static_cast<size_t>(p.nMLP)) {
286 B2FATAL(std::string(name) + " size (" + std::to_string(s)
287 + ") != 1 and != nMLP (" + std::to_string(p.nMLP) + ")");
288 }
289 };
290
291 // sector / structure parameters
292 check_size(p.i_cdc_sector, "i_cdc_sector");
293 check_size(p.i_ecl_sector, "i_ecl_sector");
294 check_size(p.nHidden, "nHidden");
295 check_size(p.outputScale, "outputScale");
296
297 // bias
298 check_size(p.total_bit_bias, "total_bit_bias");
299 check_size(p.int_bit_bias, "int_bit_bias");
300 check_size(p.is_signed_bias, "is_signed_bias");
301 check_size(p.rounding_bias, "rounding_bias");
302 check_size(p.saturation_bias, "saturation_bias");
303
304 // accumulator
305 check_size(p.total_bit_accum, "total_bit_accum");
306 check_size(p.int_bit_accum, "int_bit_accum");
307 check_size(p.is_signed_accum, "is_signed_accum");
308 check_size(p.rounding_accum, "rounding_accum");
309 check_size(p.saturation_accum, "saturation_accum");
310
311 // weights
312 check_size(p.total_bit_weight, "total_bit_weight");
313 check_size(p.int_bit_weight, "int_bit_weight");
314 check_size(p.is_signed_weight, "is_signed_weight");
315 check_size(p.rounding_weight, "rounding_weight");
316 check_size(p.saturation_weight, "saturation_weight");
317
318 // relu
319 check_size(p.total_bit_relu, "total_bit_relu");
320 check_size(p.int_bit_relu, "int_bit_relu");
321 check_size(p.is_signed_relu, "is_signed_relu");
322 check_size(p.rounding_relu, "rounding_relu");
323 check_size(p.saturation_relu, "saturation_relu");
324
325 // generic
326 check_size(p.total_bit, "total_bit");
327 check_size(p.int_bit, "int_bit");
328 check_size(p.is_signed, "is_signed");
329 check_size(p.rounding, "rounding");
330 check_size(p.saturation, "saturation");
331
332 // inputs
333 check_size(p.W_input, "W_input");
334 check_size(p.I_input, "I_input");
335
336 // ------------------------------------------------------------------
337 // Initialize MLPs
338 // ------------------------------------------------------------------
339
340 m_MLPs.clear();
341 m_MLPs.reserve(p.nMLP);
342
343 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
344
345 // helper: broadcast if size == 1
346 auto pick = [&](const auto & container) -> const auto& {
347 return (container.size() == 1) ? container[0] : container[iMLP];
348 };
349
350 unsigned short i_cdc = static_cast<unsigned short>(pick(p.i_cdc_sector));
351 unsigned short i_ecl = static_cast<unsigned short>(pick(p.i_ecl_sector));
352 unsigned short nInput = static_cast<unsigned short>(i_cdc + i_ecl);
353
354 // nHidden: vector<vector<float>>
355 const vector<float>& nhidden = pick(p.nHidden);
356 vector<unsigned short> nNodes = { nInput };
357
358 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
359 if (p.multiplyHidden) {
360 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid] * nNodes[0]));
361 } else {
362 nNodes.push_back(static_cast<unsigned short>(nhidden[iHid]));
363 }
364 }
365
366 nNodes.push_back(nTarget);
367 unsigned short targetVars = static_cast<unsigned short>(p.targetresult);
368
369 const vector<float>& outputScale = pick(p.outputScale);
370
371 GRLMLP grlmlp_temp(nNodes, targetVars, outputScale);
372
373 // bias
374 grlmlp_temp.set_total_bit_bias(pick(p.total_bit_bias));
375 grlmlp_temp.set_int_bit_bias(pick(p.int_bit_bias));
376 grlmlp_temp.set_is_signed_bias(pick(p.is_signed_bias));
377 grlmlp_temp.set_rounding_bias(pick(p.rounding_bias));
378 grlmlp_temp.set_saturation_bias(pick(p.saturation_bias));
379
380 // accumulator
381 grlmlp_temp.set_total_bit_accum(pick(p.total_bit_accum));
382 grlmlp_temp.set_int_bit_accum(pick(p.int_bit_accum));
383 grlmlp_temp.set_is_signed_accum(pick(p.is_signed_accum));
384 grlmlp_temp.set_rounding_accum(pick(p.rounding_accum));
385 grlmlp_temp.set_saturation_accum(pick(p.saturation_accum));
386
387 // weights
388 grlmlp_temp.set_total_bit_weight(pick(p.total_bit_weight));
389 grlmlp_temp.set_int_bit_weight(pick(p.int_bit_weight));
390 grlmlp_temp.set_is_signed_weight(pick(p.is_signed_weight));
391 grlmlp_temp.set_rounding_weight(pick(p.rounding_weight));
392 grlmlp_temp.set_saturation_weight(pick(p.saturation_weight));
393
394 // relu
395 grlmlp_temp.set_total_bit_relu(pick(p.total_bit_relu));
396 grlmlp_temp.set_int_bit_relu(pick(p.int_bit_relu));
397 grlmlp_temp.set_is_signed_relu(pick(p.is_signed_relu));
398 grlmlp_temp.set_rounding_relu(pick(p.rounding_relu));
399 grlmlp_temp.set_saturation_relu(pick(p.saturation_relu));
400
401 // generic
402 grlmlp_temp.set_total_bit(pick(p.total_bit));
403 grlmlp_temp.set_int_bit(pick(p.int_bit));
404 grlmlp_temp.set_is_signed(pick(p.is_signed));
405 grlmlp_temp.set_rounding(pick(p.rounding));
406 grlmlp_temp.set_saturation(pick(p.saturation));
407
408 // inputs
409 grlmlp_temp.set_W_input(pick(p.W_input));
410 grlmlp_temp.set_I_input(pick(p.I_input));
411
412 m_MLPs.push_back(std::move(grlmlp_temp));
413 }
414
415 B2DEBUG(10, "GRLNeuro::initialize finished. created "
416 << m_MLPs.size() << " MLP(s).");
417}
418
419
420
421float
422GRLNeuro::runMLP(unsigned isector, const std::vector<float>& input)
423{
424 const GRLMLP& expert = m_MLPs[isector];
425 vector<float> weights = expert.get_weights();
426 vector<float> bias = expert.get_bias();
427 vector<int> total_bit_bias = expert.get_total_bit_bias();
428 vector<int> int_bit_bias = expert.get_int_bit_bias();
429 vector<bool> is_signed_bias = expert.get_is_signed_bias();
430 vector<int> rounding_bias = expert.get_rounding_bias();
431 vector<int> saturation_bias = expert.get_saturation_bias();
432 vector<int> total_bit_accum = expert.get_total_bit_accum();
433 vector<int> int_bit_accum = expert.get_int_bit_accum();
434 vector<bool> is_signed_accum = expert.get_is_signed_accum();
435 vector<int> rounding_accum = expert.get_rounding_accum();
436 vector<int> saturation_accum = expert.get_saturation_accum();
437 vector<int> total_bit_weight = expert.get_total_bit_weight();
438 vector<int> int_bit_weight = expert.get_int_bit_weight();
439 vector<bool> is_signed_weight = expert.get_is_signed_weight();
440 vector<int> rounding_weight = expert.get_rounding_weight();
441 vector<int> saturation_weight = expert.get_saturation_weight();
442 vector<int> total_bit_relu = expert.get_total_bit_relu();
443 vector<int> int_bit_relu = expert.get_int_bit_relu();
444 vector<bool> is_signed_relu = expert.get_is_signed_relu();
445 vector<int> rounding_relu = expert.get_rounding_relu();
446 vector<int> saturation_relu = expert.get_saturation_relu();
447 vector<int> total_bit = expert.get_total_bit();
448 vector<int> int_bit = expert.get_int_bit();
449 vector<bool> is_signed = expert.get_is_signed();
450 vector<int> rounding = expert.get_rounding();
451 vector<int> saturation = expert.get_saturation();
452 vector<vector<int>> W_input = expert.get_W_input();
453 vector<vector<int>> I_input = expert.get_I_input();
454
455
456 //input layer
457 vector<float> layerinput = input;
458
459 // quantizer the inputs
460 for (size_t i = 0; i < layerinput.size(); ++i) {
461
462 int W_arr[24] = { 12, 12, 11, 11, 8, 8, 7, 7, 5, 6, 6, 6, 8, 8, 6, 5, 4, 5, 7, 7, 6, 4, 5, 5 };
463 int I_arr[24] = { 12, 12, 11, 11, 10, 9, 7, 7, 7, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
464 if (i != 23) {
465 layerinput[i] = sim_input_layer_t(layerinput[i]);
466 layerinput[i] = sim_ap_dense_0_iq(layerinput[i], W_arr[i], I_arr[i]);
467
468 } else layerinput[i] = 0;
469 }
470
471 //hidden layer and output layer
472 vector<float> layeroutput = {};
473 unsigned num_layers = expert.get_number_of_layers();
474
475 unsigned num_total_neurons = 0;
476 unsigned iw = 0;
477 for (unsigned i_layer = 0; i_layer < num_layers - 1; i_layer++) {
478 //read bias
479 unsigned num_neurons = expert.get_number_of_nodes_layer(i_layer + 1);
480 layeroutput.clear();
481 layeroutput.assign(num_neurons, 0.);
482 layeroutput.shrink_to_fit();
483
484 for (unsigned io = 0; io < num_neurons; ++io) {
485 float bias_raw = bias[io + num_total_neurons];
486 float bias_fixed = sim_fixed(bias_raw, total_bit_bias[i_layer], int_bit_bias[i_layer], is_signed_bias[i_layer],
487 rounding_bias[i_layer], saturation_bias[i_layer]);
488 float bias_contrib = sim_fixed(bias_fixed, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
489 rounding_accum[i_layer], saturation_accum[i_layer]);
490 layeroutput[io] = bias_contrib;
491
492 }
493 num_total_neurons += num_neurons;
494
495 //input*weight
496 unsigned num_inputs = layerinput.size();
497 for (unsigned ii = 0; ii < num_inputs; ++ii) {
498 float input_val = layerinput[ii];
499 for (unsigned io = 0; io < num_neurons; ++io) {
500 float weight_raw = weights[iw];
501 float weight_fixed = sim_fixed(weight_raw, total_bit_weight[i_layer], int_bit_weight[i_layer], is_signed_weight[i_layer],
502 rounding_weight[i_layer], saturation_weight[i_layer]);
503 float product = input_val * weight_fixed;
504 float contrib = sim_fixed(product, total_bit_accum[i_layer], int_bit_accum[i_layer], is_signed_accum[i_layer],
505 rounding_accum[i_layer], saturation_accum[i_layer]);
506 layeroutput[io] += contrib;
507 ++iw;
508 }
509 }
510 // input*weight done
511 if (i_layer < num_layers - 2) {
512 //relu
513 for (unsigned io = 0; io < num_neurons; ++io) {
514 float fixed_val = sim_fixed(layeroutput[io], total_bit[i_layer], int_bit[i_layer], is_signed[i_layer], rounding[i_layer],
515 saturation[i_layer]);
516 float relu_val = (fixed_val > 0) ? fixed_val : 0;
517 layeroutput[io] = sim_fixed(relu_val, total_bit_relu[i_layer], int_bit_relu[i_layer], is_signed_relu[i_layer],
518 rounding_relu[i_layer], saturation_relu[i_layer]);
519 }
520
521
522 //input to next layer
523 layerinput.clear();
524 layerinput.assign(num_neurons, 0);
525 layerinput.shrink_to_fit();
526
527 for (unsigned i = 0; i < num_neurons; ++i) {
528
529 layerinput[i] = sim_ap_fixed(layeroutput[i], W_input[i_layer][i], I_input[i_layer][i], false, 1, 4, 0);
530
531 }
532 if (i_layer == 0) {}
533 else if (i_layer == 1) {
534
535 layerinput[1] = 0;
536 layerinput[10] = 0;
537 layerinput[18] = 0;
538
539 } else if (i_layer == 2) {
540 layerinput[2] = 0;
541 layerinput[16] = 0;
542
543 }
544
545
546 } else {
547
548
549 // === HLS result_t: ap_fixed<19,5,AP_RND,AP_SAT_SYM,0> ===
550 float final_fixed = sim_result_t(layeroutput[0]);
551
552 return final_fixed;
553 }
554 }
555 return 0;
556}
557
558
559bool GRLNeuro::load(unsigned isector, const string& weightfilename, const string& biasfilename)
560{
561 if (weightfilename.size() < 1) {
562 B2ERROR("Could not load Neurotrigger weights from database!");
563 return false;
564 } else if (biasfilename.size() < 1) {
565 B2ERROR("Could not load Neurotrigger bias from database!");
566 return false;
567 } else {
568 std::ifstream wfile(weightfilename);
569 if (!wfile.is_open()) {
570 B2WARNING("Could not open file " << weightfilename);
571 return false;
572 } else {
573 std::ifstream bfile(biasfilename);
574 if (!bfile.is_open()) {
575 B2WARNING("Could not open file " << biasfilename);
576 return false;
577 } else {
578 GRLMLP& expert = m_MLPs[isector];
579 std::vector<float> warray;
580 std::vector<float> barray;
581 warray.clear();
582 barray.clear();
583
584 float element;
585 while (wfile >> element) {
586 warray.push_back(element);
587 }
588 while (bfile >> element) {
589 barray.push_back(element);
590 }
591
592 if (warray.size() != expert.n_weights_cal()) {
593 B2ERROR("Number of weights is not equal to registered architecture!");
594 return false;
595 } else expert.set_weights(warray);
596 if (barray.size() != expert.n_bias_cal()) {
597 B2ERROR("Number of bias is not equal to registered architecture!");
598 return false;
599 }
600
601 expert.set_weights(warray);
602 expert.set_bias(barray);
603 return true;
604 }
605 }
606 }
607}
608
609bool GRLNeuro::load(unsigned isector, std::vector<float> warray, std::vector<float> barray)
610{
611 GRLMLP& expert = m_MLPs[isector];
612 expert.set_weights(warray);
613 expert.set_bias(barray);
614 return true;
615}
Class to keep all parameters of an expert MLP for the neuro trigger.
Definition GRLMLP.h:21
void set_total_bit_bias(const std::vector< int > &i)
set bit width etc.
Definition GRLMLP.h:87
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
Definition GRLNeuro.cc:248
bool load(unsigned isector, const std::string &wfilename, const std::string &bfilename)
Load MLPs from file.
Definition GRLNeuro.cc:559
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
Definition GRLNeuro.cc:422
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:137
Abstract base class for different kinds of events.
STL namespace.
Struct to keep neurotrigger parameters.
Definition GRLNeuro.h:40