Belle II Software prerelease-10-00-00a
GRLNeuro.cc
1/**************************************************************************
2 * basf2 (Belle II Analysis Software Framework) *
3 * Author: The Belle II Collaboration *
4 * *
5 * See git log for contributors and copyright holders. *
6 * This file is licensed under LGPL-3.0, see LICENSE.md. *
7 **************************************************************************/
8
9#include <framework/logging/Logger.h>
10#include <trg/grl/GRLNeuro.h>
11#include <trg/grl/dataobjects/GRLMLP.h>
12#include <cdc/geometry/CDCGeometryPar.h>
13#include <framework/gearbox/Const.h>
14#include <framework/gearbox/Unit.h>
15#include <framework/datastore/StoreObjPtr.h>
16#include <framework/datastore/StoreArray.h>
17#include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
18#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
19#include <string>
20#include <cmath>
21#include <TFile.h>
22#include <algorithm>
23#include <iomanip>
24#include <iostream>
25#include <vector>
26
27using namespace Belle2;
28using namespace CDC;
29using namespace std;
30
31// ========== ap_fixed<Total, Int, AP_TRN, AP_SAT> ==========
32enum RoundingMode { TRN, RND };
33enum SaturationMode { NONE, SAT, WRAP, SAT_SYM };
34
35
36float sim_fixed(float val, int total_bits, int int_bits,
37 bool is_signed = true,
38 RoundingMode rounding = TRN,
39 SaturationMode saturation = SAT)
40{
41 int frac_bits = total_bits - int_bits;
42 float scale = std::pow(2.0f, frac_bits);
43 float scaled_val = val * scale;
44
45 int64_t fixed_val;
46 if (rounding == RND)
47 fixed_val = static_cast<int64_t>(std::round(scaled_val));
48 else
49 fixed_val = static_cast<int64_t>(std::trunc(scaled_val));
50
51 int64_t max_val, min_val;
52
53 if (is_signed) {
54 max_val = (1LL << (total_bits - 1)) - 1;
55 min_val = -(1LL << (total_bits - 1));
56 } else {
57 max_val = (1LL << total_bits) - 1;
58 min_val = 0;
59 }
60
61 // wrap
62 if (fixed_val > max_val || fixed_val < min_val) {
63 switch (saturation) {
64 case SAT:
65 fixed_val = std::min(std::max(fixed_val, min_val), max_val);
66 break;
67 case WRAP:
68 if (is_signed) {
69 int64_t mod = 1LL << total_bits;
70 fixed_val = (fixed_val + mod) % mod;
71 if (fixed_val >= (1LL << (total_bits - 1)))
72 fixed_val -= (1LL << total_bits);
73 } else {
74 fixed_val = fixed_val % (1LL << total_bits);
75 }
76 break;
77 case SAT_SYM:
78 if (val >= 0)
79 fixed_val = std::min(fixed_val, max_val);
80 else
81 fixed_val = std::max(fixed_val, min_val);
82 break;
83 case NONE:
84 default:
85 break;
86 }
87 }
88
89 return static_cast<float>(fixed_val) / scale;
90}
91
92// dense_0
93inline float sim_fix_dense_0_accum_t(float x) { return sim_fixed(x, 24, 16); }
94inline float sim_fix_dense_0_t(float x) { return sim_fixed(x, 20, 16); }
95inline float sim_fix_dense_0_weight_t(float x) { return sim_fixed(x, 10, 2); }
96inline float sim_fix_dense_0_bias_t(float x) { return sim_fixed(x, 5, 1); }
97
98// dense_0_relu
99inline float sim_fix_dense_0_relu_t(float x) { return sim_fixed(x, 15, 11, false); }
100inline float sim_fix_dense_0_relu_table_t(float x) { return sim_fixed(x, 18, 8); }
101
102// dense_1
103inline float sim_fix_dense_1_iq_t(float x) { return sim_fixed(x, 14, 11, false); }
104inline float sim_fix_dense_1_accum_t(float x) { return sim_fixed(x, 23, 14); }
105inline float sim_fix_dense_1_t(float x) { return sim_fixed(x, 19, 14); }
106inline float sim_fix_dense_1_weight_t(float x) { return sim_fixed(x, 8, 2); }
107inline float sim_fix_dense_1_bias_t(float x) { return sim_fixed(x, 5, 1); }
108
109// dense_1_relu
110inline float sim_fix_dense_1_relu_t(float x) { return sim_fixed(x, 15, 10, false); }
111inline float sim_fix_dense_1_relu_table_t(float x) { return sim_fixed(x, 18, 8); }
112
113// dense_2
114inline float sim_fix_dense_2_iq_t(float x) { return sim_fixed(x, 14, 10, false); }
115inline float sim_fix_dense_2_accum_t(float x) { return sim_fixed(x, 19, 10); }
116inline float sim_fix_dense_2_weight_t(float x) { return sim_fixed(x, 8, 1); }
117inline float sim_fix_dense_2_bias_t(float x) { return sim_fixed(x, 1, 0, false); }
118
119// dense_2 result_t,RND + WRAP + UNSIGNED = false
120inline float sim_fix_result_t(float x)
121{
122 return sim_fixed(x, 15, 8, true, RND, WRAP);
123}
124
125
126float sim_float_to_ufixed(float val, int W, int I)
127{
128 int F = W - I;
129 float scale = std::pow(2.0f, F);
130 float rounded = std::round(val * scale) / scale;
131
132 // [0, 2^I - 1 - 1/scale]
133 float max_val = std::pow(2.0f, I) - 1.0f / scale;
134 float min_val = 0.0f;
135
136 if (rounded > max_val) return max_val;
137 if (rounded < min_val) return min_val;
138
139 return rounded;
140}
141
142float sim_dense_1_input_quant(unsigned index, float val)
143{
144 switch (index) {
145 case 0: return sim_float_to_ufixed(val, 7, 8);
146 case 1: return sim_float_to_ufixed(val, 12, 9);
147 case 2: return sim_float_to_ufixed(val, 14, 11);
148 case 3: return sim_float_to_ufixed(val, 11, 9);
149 case 4: return sim_float_to_ufixed(val, 7, 7);
150 case 5: return sim_float_to_ufixed(val, 4, 9);
151 case 6: return sim_float_to_ufixed(val, 13, 10);
152 case 7: return sim_float_to_ufixed(val, 10, 8);
153 case 8: return sim_float_to_ufixed(val, 9, 8);
154 case 9: return sim_float_to_ufixed(val, 8, 8);
155 case 10: return sim_float_to_ufixed(val, 8, 7);
156 case 11: return sim_float_to_ufixed(val, 12, 10);
157 case 12: return sim_float_to_ufixed(val, 7, 7);
158 case 13: return sim_float_to_ufixed(val, 6, 9);
159 case 14: return sim_float_to_ufixed(val, 11, 10);
160 case 15: return sim_float_to_ufixed(val, 9, 7);
161 case 16: return sim_float_to_ufixed(val, 14, 11);
162 case 17: return sim_float_to_ufixed(val, 13, 10);
163 case 18: return sim_float_to_ufixed(val, 8, 11);
164 case 19: return sim_float_to_ufixed(val, 10, 8);
165 case 20: return sim_float_to_ufixed(val, 7, 7);
166 case 21: return sim_float_to_ufixed(val, 12, 10);
167 case 22: return sim_float_to_ufixed(val, 7, 8);
168 case 23: return sim_float_to_ufixed(val, 4, 7);
169 case 24: return sim_float_to_ufixed(val, 13, 10);
170 case 25: return sim_float_to_ufixed(val, 6, 5);
171 case 26: return sim_float_to_ufixed(val, 12, 10);
172 case 27: return sim_float_to_ufixed(val, 7, 9);
173 case 28: return sim_float_to_ufixed(val, 11, 10);
174 case 29: return sim_float_to_ufixed(val, 12, 10);
175 case 30: return sim_float_to_ufixed(val, 13, 10);
176 case 31: return sim_float_to_ufixed(val, 11, 10);
177 case 32: return sim_float_to_ufixed(val, 12, 10);
178 case 33: return sim_float_to_ufixed(val, 12, 9);
179 case 34: return sim_float_to_ufixed(val, 14, 11);
180 case 35: return sim_float_to_ufixed(val, 12, 10);
181 case 36: return sim_float_to_ufixed(val, 12, 10);
182 case 37: return sim_float_to_ufixed(val, 10, 10);
183 case 38: return sim_float_to_ufixed(val, 11, 10);
184 case 39: return sim_float_to_ufixed(val, 13, 10);
185 case 40: return sim_float_to_ufixed(val, 10, 10);
186 case 41: return sim_float_to_ufixed(val, 7, 9);
187 case 42: return sim_float_to_ufixed(val, 11, 10);
188 case 43: return sim_float_to_ufixed(val, 7, 8);
189 case 44: return sim_float_to_ufixed(val, 10, 8);
190 case 45: return 0.0f;
191 case 46: return sim_float_to_ufixed(val, 7, 8);
192 case 47: return sim_float_to_ufixed(val, 5, 7);
193 case 48: return 0.0f;
194 case 49: return sim_float_to_ufixed(val, 11, 11);
195 case 50: return sim_float_to_ufixed(val, 13, 10);
196 case 51: return sim_float_to_ufixed(val, 7, 8);
197 case 52: return sim_float_to_ufixed(val, 11, 11);
198 case 53: return sim_float_to_ufixed(val, 14, 11);
199 case 54: return sim_float_to_ufixed(val, 7, 7);
200 case 55: return sim_float_to_ufixed(val, 3, 7);
201 case 56: return sim_float_to_ufixed(val, 8, 9);
202 case 57: return sim_float_to_ufixed(val, 8, 8);
203 case 58: return sim_float_to_ufixed(val, 13, 10);
204 case 59: return sim_float_to_ufixed(val, 11, 9);
205 case 60: return sim_float_to_ufixed(val, 11, 9);
206 case 61: return sim_float_to_ufixed(val, 11, 8);
207 case 62: return sim_float_to_ufixed(val, 10, 9);
208 case 63: return sim_float_to_ufixed(val, 7, 9);
209 default: return val;
210 }
211}
212
213float sim_dense_2_input_quant(unsigned index, float val)
214{
215 switch (index) {
216 case 0: return sim_float_to_ufixed(val, 3, -1);
217 case 1: return sim_float_to_ufixed(val, 13, 9);
218 case 2: return 0.0f;
219 case 3: return sim_float_to_ufixed(val, 10, 6);
220 case 4: return sim_float_to_ufixed(val, 9, 5);
221 case 5: return sim_float_to_ufixed(val, 10, 6);
222 case 6: return sim_float_to_ufixed(val, 8, 6);
223 case 7: return sim_float_to_ufixed(val, 14, 10);
224 case 8: return 0.0f;
225 case 9: return 0.0f;
226 case 10: return 0.0f;
227 case 11: return sim_float_to_ufixed(val, 10, 6);
228 case 12: return sim_float_to_ufixed(val, 9, 5);
229 case 13: return sim_float_to_ufixed(val, 1, 5);
230 case 14: return sim_float_to_ufixed(val, 8, 5);
231 case 15: return 0.0f;
232 case 16: return sim_float_to_ufixed(val, 9, 5);
233 case 17: return sim_float_to_ufixed(val, 13, 9);
234 case 18: return sim_float_to_ufixed(val, 10, 6);
235 case 19: return sim_float_to_ufixed(val, 10, 7);
236 case 20: return sim_float_to_ufixed(val, 7, 3);
237 case 21: return 0.0f;
238 case 22: return sim_float_to_ufixed(val, 11, 7);
239 case 23: return sim_float_to_ufixed(val, 10, 6);
240 case 24: return sim_float_to_ufixed(val, 11, 7);
241 case 25: return 0.0f;
242 case 26: return sim_float_to_ufixed(val, 10, 6);
243 case 27: return 0.0f;
244 case 28: return 0.0f;
245 case 29: return sim_float_to_ufixed(val, 9, 5);
246 case 30: return sim_float_to_ufixed(val, 3, 4);
247 case 31: return sim_float_to_ufixed(val, 10, 7);
248 case 32: return 0.0f;
249 case 33: return sim_float_to_ufixed(val, 10, 7);
250 case 34: return 0.0f;
251 case 35: return sim_float_to_ufixed(val, 8, 7);
252 case 36: return sim_float_to_ufixed(val, 7, 3);
253 case 37: return sim_float_to_ufixed(val, 9, 5);
254 case 38: return sim_float_to_ufixed(val, 11, 7);
255 case 39: return sim_float_to_ufixed(val, 4, 5);
256 case 40: return sim_float_to_ufixed(val, 10, 6);
257 case 41: return sim_float_to_ufixed(val, 10, 6);
258 case 42: return sim_float_to_ufixed(val, 12, 9);
259 case 43: return sim_float_to_ufixed(val, 10, 6);
260 case 44: return sim_float_to_ufixed(val, 6, 6);
261 case 45: return sim_float_to_ufixed(val, 7, 3);
262 case 46: return sim_float_to_ufixed(val, 10, 6);
263 case 47: return sim_float_to_ufixed(val, 9, 5);
264 case 48: return sim_float_to_ufixed(val, 7, 4);
265 case 49: return sim_float_to_ufixed(val, 10, 6);
266 case 50: return sim_float_to_ufixed(val, 13, 9);
267 case 51: return 0.0f;
268 case 52: return sim_float_to_ufixed(val, 9, 5);
269 case 53: return sim_float_to_ufixed(val, 10, 8);
270 case 54: return 0.0f;
271 case 55: return sim_float_to_ufixed(val, 9, 5);
272 case 56: return sim_float_to_ufixed(val, 9, 5);
273 case 57: return sim_float_to_ufixed(val, 13, 9);
274 case 58: return 0.0f;
275 case 59: return sim_float_to_ufixed(val, 10, 7);
276 case 60: return sim_float_to_ufixed(val, 10, 6);
277 case 61: return sim_float_to_ufixed(val, 8, 4);
278 case 62: return sim_float_to_ufixed(val, 10, 6);
279 case 63: return 0.0f;
280 default: return val;
281 }
282}
283
284
285
286void
288{
289 // check parameters
290 bool okay = true;
291 // ensure that length of lists matches number of sectors
292 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
293 B2ERROR("Number of nHidden lists should be 1 or " << p.nMLP);
294 okay = false;
295 }
296 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
297 B2ERROR("Number of outputScale lists should be 1 or " << p.nMLP);
298 okay = false;
299 }
300 // ensure that number of target nodes is valid
301 unsigned short nTarget = int(p.targetresult);
302 if (nTarget < 1) {
303 B2ERROR("No outputs! Turn on targetresult.");
304 okay = false;
305 }
306 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
307 if (p.outputScale[iScale].size() != 2 * nTarget) {
308 B2ERROR("outputScale should be exactly " << 2 * nTarget << " values");
309 okay = false;
310 }
311 }
312
313 if (!okay) return;
314 // initialize MLPs
315 m_MLPs.clear();
316 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
317 //get indices for sector parameters
318 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
319 vector<float> nhidden = p.nHidden[iMLP];
320 vector<unsigned short> nNodes = {nInput};
321 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
322 if (p.multiplyHidden) {
323 nNodes.push_back(nhidden[iHid] * nNodes[0]);
324 } else {
325 nNodes.push_back(nhidden[iHid]);
326 }
327 }
328 nNodes.push_back(nTarget);
329 unsigned short targetVars = int(p.targetresult);
330 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
331 m_MLPs.push_back(GRLMLP(nNodes, targetVars, outputScale));
332 }
333}
334
335
336float sim_ap_fixed(float val, int total_bits = 12, int int_bits = 12,
337 bool round = true, bool wrap = true)
338{
339 int frac_bits = total_bits - int_bits; //
340 float scale = std::pow(2, frac_bits); // scale = 1
341
342 // Apply rounding if needed
343 float scaled_val = val * scale;
344 int fixed_val = round ? std::round(scaled_val) : std::floor(scaled_val);
345
346 int max_int = std::pow(2, total_bits) - 1; // For 12-bit unsigned, max = 4095
347 int raw_val;
348
349 if (wrap) {
350 //
351 raw_val = fixed_val & max_int; // == fixed_val % (1 << total_bits)
352 } else {
353 //
354 raw_val = std::min(std::max(fixed_val, 0), max_int);
355 }
356
357 return raw_val / scale;
358}
359
360//
361float sim_fix_input_layer_t(float val)
362{
363 return sim_ap_fixed(val, 12, 12, true, true); // AP_RND + AP_WRAP
364}
365
366std::vector<float> sim_dense_0_iq(const std::vector<float>& input)
367{
368 const std::vector<std::pair<int, int>> dense_0_iq_config = {
369 {12, 5}, {12, 4}, {10, 6}, {8, 4}, {8, 4}, {9, 5},
370 {6, 2}, {8, 3}, {6, 3}, {5, 4}, {7, 4}, {9, 5},
371 {8, 2}, {8, 2}, {6, 3}, {5, 3}, {8, 4}, {6, 2}
372 };
373
374 std::vector<float> output;
375 output.reserve(input.size());
376 for (size_t i = 0; i < input.size(); ++i) {
377 int total_bits = dense_0_iq_config[i].first;
378 int int_bits = dense_0_iq_config[i].second;
379 output.push_back(sim_ap_fixed(input[i], total_bits, int_bits, RND, SAT_SYM));
380 }
381 return output;
382}
384
385
386
388float
389GRLNeuro::runMLP(unsigned isector, const std::vector<float>& input)
390{
391
392
393 const GRLMLP& expert = m_MLPs[isector];
394 vector<float> weights = expert.getWeights();
395 vector<float> bias = expert.getBias();
396 vector<float> layerinput = input;
397
398 vector<float> layeroutput2 = {};
399 vector<float> layeroutput3 = {};
400 vector<float> layeroutput4 = {};
401
403 for (size_t i = 0; i < layerinput.size(); ++i) {
404 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
405 }
406 layeroutput2.clear();
407 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
408
409 unsigned num_inputs = layerinput.size();
410 unsigned num_neurons = expert.getNumberOfNodesLayer(2); // 64
411 for (unsigned io = 0; io < num_neurons; ++io) {
412 float bias_raw = bias[io];
413 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
414 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
415 layeroutput2[io] = bias_contrib;
416 }
417
418 unsigned iw = 0;
419// input*weight
420 for (unsigned ii = 0; ii < num_inputs; ++ii) {
421 float input_val = layerinput[ii];
422 for (unsigned io = 0; io < num_neurons; ++io) {
423 float weight_raw = weights[iw];
424 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
425 float product = input_val * weight_fixed;
426 float contrib = sim_fix_dense_0_accum_t(product);
427
428 layeroutput2[io] += contrib;
429
430 ++iw;
431 }
432 }
433
434
435//apply activation function, ReLU for hidden layer and output layer
436// === dense_0_t + ReLU ===
437 std::vector<float> layeroutput2_fixed_relu(num_neurons);
438
439 for (unsigned io = 0; io < num_neurons; ++io) {
440 // dense_0_t)
441 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
442
443 // ReLU
444 float relu_val = (fixed_val > 0) ? fixed_val : 0;
445
446 layeroutput2_fixed_relu[io] = relu_val;
447
448 }
449
450 std::vector<float> dense1_input(64);
451 for (unsigned i = 0; i < 64; ++i) {
452 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
453 }
454
455 layeroutput3.clear();
456 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
457 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
458 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
459 for (unsigned io = 64; io < num_neurons_1 + 64; ++io) {
460 float bias_raw = bias[io];
461 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
462 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
463 layeroutput3[io - 64] = bias_contrib;
464
465 }
466
467
468 for (unsigned ii = 0; ii < num_inputs_1; ++ii) {
469 float input_val = dense1_input[ii];
470 for (unsigned io = 0; io < num_neurons_1; ++io) {
471
472 float weight_raw = weights[iw];
473
474 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
475 float product = input_val * weight_fixed;
476 float contrib = sim_fix_dense_1_accum_t(product);
477
478 layeroutput3[io] += contrib;
479 ++iw;
480 }
481 }
482
483
484 std::vector<float> layeroutput3_fixed_relu(num_neurons);
485
486
487 for (unsigned io = 0; io < num_neurons_1; ++io) {
488 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
489 // ReLU
490 float relu_val = (fixed_val > 0) ? fixed_val : 0;
491
492 layeroutput3_fixed_relu[io] = relu_val;
493
494 }
495 std::vector<float> dense2_input(64);
496 for (unsigned i = 0; i < 64; ++i) {
497 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
498 }
499 layeroutput4.clear();
500 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
501
502 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
503 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
504 for (unsigned io = 128; io < num_neurons_2 + 128; ++io) {
505 float bias_raw = bias[io];
506 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
507 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
508 layeroutput4[io - 128] = bias_contrib;
509
510 }
511
512 for (unsigned ii = 0; ii < num_inputs_2; ++ii) {
513 float input_val = dense2_input[ii];
514 for (unsigned io = 0; io < num_neurons_2; ++io) {
515 float weight_raw = weights[iw];
516 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
517 float product = input_val * weight_fixed;
518 float contrib = sim_fix_dense_2_accum_t(product);
519
520 layeroutput4[io] += contrib;
521
522 ++iw;
523 }
524 }
525 return layeroutput4[0];
526
527}
528
529
530bool GRLNeuro::load(unsigned isector, const string& weightfilename, const string& biasfilename)
531{
532 if (weightfilename.size() < 1) {
533 B2ERROR("Could not load Neurotrigger weights from database!");
534 return false;
535 } else if (biasfilename.size() < 1) {
536 B2ERROR("Could not load Neurotrigger bias from database!");
537 return false;
538 } else {
539 std::ifstream wfile(weightfilename);
540 if (!wfile.is_open()) {
541 B2WARNING("Could not open file " << weightfilename);
542 return false;
543 } else {
544 std::ifstream bfile(biasfilename);
545 if (!bfile.is_open()) {
546 B2WARNING("Could not open file " << biasfilename);
547 return false;
548 } else {
549 GRLMLP& expert = m_MLPs[isector];
550 std::vector<float> warray;
551 std::vector<float> barray;
552 warray.clear();
553 barray.clear();
554
555 float element;
556 while (wfile >> element) {
557 warray.push_back(element);
558 }
559 while (bfile >> element) {
560 barray.push_back(element);
561 }
562
563 if (warray.size() != expert.nWeightsCal()) {
564 B2ERROR("Number of weights is not equal to registered architecture!");
565 return false;
566 } else expert.setWeights(warray);
567 if (barray.size() != expert.nBiasCal()) {
568 B2ERROR("Number of bias is not equal to registered architecture!");
569 return false;
570 }
571
572 expert.setWeights(warray);
573 expert.setBias(barray);
574 return true;
575 }
576 }
577 }
578}
579
580
581
582bool GRLNeuro::load(unsigned isector, std::vector<float> warray, std::vector<float> barray)
583{
584 GRLMLP& expert = m_MLPs[isector];
585 expert.setWeights(warray);
586 expert.setBias(barray);
587 return true;
588}
Class to keep all parameters of an expert MLP for the neuro trigger.
Definition GRLMLP.h:21
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
Definition GRLNeuro.cc:287
bool load(unsigned isector, const std::string &wfilename, const std::string &bfilename)
Load MLPs from file.
Definition GRLNeuro.cc:530
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
Definition GRLNeuro.cc:389
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:114
Abstract base class for different kinds of events.
STL namespace.
Struct to keep neurotrigger parameters.
Definition GRLNeuro.h:41