Belle II Software development
GRLNeuro.cc
1/**************************************************************************
2 * basf2 (Belle II Analysis Software Framework) *
3 * Author: The Belle II Collaboration *
4 * *
5 * See git log for contributors and copyright holders. *
6 * This file is licensed under LGPL-3.0, see LICENSE.md. *
7 **************************************************************************/
8
9#include <framework/logging/Logger.h>
10#include <trg/grl/GRLNeuro.h>
11#include <trg/grl/dataobjects/GRLMLP.h>
12#include <cdc/geometry/CDCGeometryPar.h>
13#include <framework/gearbox/Const.h>
14#include <framework/gearbox/Unit.h>
15#include <framework/datastore/StoreObjPtr.h>
16#include <framework/datastore/StoreArray.h>
17#include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
18#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
19#include <string>
20#include <cmath>
21#include <TFile.h>
22#include <algorithm>
23#include <iomanip>
24#include <iostream>
25#include <vector>
26
27using namespace Belle2;
28using namespace CDC;
29using namespace std;
30
31// ========== ap_fixed<Total, Int, AP_TRN, AP_SAT> ==========
32
33float sim_fixed(float val, int total_bits, int int_bits,
34 bool is_signed = true,
35 int rounding = 0,
36 int saturation = 1)
37{
38 int frac_bits = total_bits - int_bits;
39 float scale = std::pow(2.0f, frac_bits);
40 float scaled_val = val * scale;
41
42 int64_t fixed_val;
43 if (rounding == 1)
44 fixed_val = static_cast<int64_t>(std::round(scaled_val));
45 else
46 fixed_val = static_cast<int64_t>(std::trunc(scaled_val));
47
48 int64_t max_val, min_val;
49
50 if (is_signed) {
51 max_val = (1LL << (total_bits - 1)) - 1;
52 min_val = -(1LL << (total_bits - 1));
53 } else {
54 max_val = (1LL << total_bits) - 1;
55 min_val = 0;
56 }
57
58 // wrap
59 if (fixed_val > max_val || fixed_val < min_val) {
60 switch (saturation) {
61 case 1:
62 fixed_val = std::min(std::max(fixed_val, min_val), max_val);
63 break;
64 case 2:
65 if (is_signed) {
66 int64_t mod = 1LL << total_bits;
67 fixed_val = (fixed_val + mod) % mod;
68 if (fixed_val >= (1LL << (total_bits - 1)))
69 fixed_val -= (1LL << total_bits);
70 } else {
71 fixed_val = fixed_val % (1LL << total_bits);
72 }
73 break;
74 case 3:
75 if (val >= 0)
76 fixed_val = std::min(fixed_val, max_val);
77 else
78 fixed_val = std::max(fixed_val, min_val);
79 break;
80 case 0:
81 default:
82 break;
83 }
84 }
85
86 return static_cast<float>(fixed_val) / scale;
87}
88
89// dense_0
90inline float sim_fix_dense_0_accum_t(float x) { return sim_fixed(x, 24, 16); }
91inline float sim_fix_dense_0_t(float x) { return sim_fixed(x, 20, 16); }
92inline float sim_fix_dense_0_weight_t(float x) { return sim_fixed(x, 10, 2); }
93inline float sim_fix_dense_0_bias_t(float x) { return sim_fixed(x, 5, 1); }
94
95// dense_0_relu
96inline float sim_fix_dense_0_relu_t(float x) { return sim_fixed(x, 15, 11, false); }
97inline float sim_fix_dense_0_relu_table_t(float x) { return sim_fixed(x, 18, 8); }
98
99// dense_1
100inline float sim_fix_dense_1_iq_t(float x) { return sim_fixed(x, 14, 11, false); }
101inline float sim_fix_dense_1_accum_t(float x) { return sim_fixed(x, 23, 14); }
102inline float sim_fix_dense_1_t(float x) { return sim_fixed(x, 19, 14); }
103inline float sim_fix_dense_1_weight_t(float x) { return sim_fixed(x, 8, 2); }
104inline float sim_fix_dense_1_bias_t(float x) { return sim_fixed(x, 5, 1); }
105
106// dense_1_relu
107inline float sim_fix_dense_1_relu_t(float x) { return sim_fixed(x, 15, 10, false); }
108inline float sim_fix_dense_1_relu_table_t(float x) { return sim_fixed(x, 18, 8); }
109
110// dense_2
111inline float sim_fix_dense_2_iq_t(float x) { return sim_fixed(x, 14, 10, false); }
112inline float sim_fix_dense_2_accum_t(float x) { return sim_fixed(x, 19, 10); }
113inline float sim_fix_dense_2_weight_t(float x) { return sim_fixed(x, 8, 1); }
114inline float sim_fix_dense_2_bias_t(float x) { return sim_fixed(x, 1, 0, false); }
115
116// dense_2 result_t,RND + 2 + UNSIGNED = false
117inline float sim_fix_result_t(float x)
118{
119 return sim_fixed(x, 15, 8, true, 1, 2);
120}
121
122
123float sim_float_to_ufixed(float val, int W, int I)
124{
125 int F = W - I;
126 float scale = std::pow(2.0f, F);
127 float rounded = std::round(val * scale) / scale;
128
129 // [0, 2^I - 1 - 1/scale]
130 float max_val = std::pow(2.0f, I) - 1.0f / scale;
131 float min_val = 0.0f;
132
133 if (rounded > max_val) return max_val;
134 if (rounded < min_val) return min_val;
135
136 return rounded;
137}
138
139float sim_dense_1_input_quant(unsigned index, float val)
140{
141 switch (index) {
142 case 0: return sim_float_to_ufixed(val, 7, 8);
143 case 1: return sim_float_to_ufixed(val, 12, 9);
144 case 2: return sim_float_to_ufixed(val, 14, 11);
145 case 3: return sim_float_to_ufixed(val, 11, 9);
146 case 4: return sim_float_to_ufixed(val, 7, 7);
147 case 5: return sim_float_to_ufixed(val, 4, 9);
148 case 6: return sim_float_to_ufixed(val, 13, 10);
149 case 7: return sim_float_to_ufixed(val, 10, 8);
150 case 8: return sim_float_to_ufixed(val, 9, 8);
151 case 9: return sim_float_to_ufixed(val, 8, 8);
152 case 10: return sim_float_to_ufixed(val, 8, 7);
153 case 11: return sim_float_to_ufixed(val, 12, 10);
154 case 12: return sim_float_to_ufixed(val, 7, 7);
155 case 13: return sim_float_to_ufixed(val, 6, 9);
156 case 14: return sim_float_to_ufixed(val, 11, 10);
157 case 15: return sim_float_to_ufixed(val, 9, 7);
158 case 16: return sim_float_to_ufixed(val, 14, 11);
159 case 17: return sim_float_to_ufixed(val, 13, 10);
160 case 18: return sim_float_to_ufixed(val, 8, 11);
161 case 19: return sim_float_to_ufixed(val, 10, 8);
162 case 20: return sim_float_to_ufixed(val, 7, 7);
163 case 21: return sim_float_to_ufixed(val, 12, 10);
164 case 22: return sim_float_to_ufixed(val, 7, 8);
165 case 23: return sim_float_to_ufixed(val, 4, 7);
166 case 24: return sim_float_to_ufixed(val, 13, 10);
167 case 25: return sim_float_to_ufixed(val, 6, 5);
168 case 26: return sim_float_to_ufixed(val, 12, 10);
169 case 27: return sim_float_to_ufixed(val, 7, 9);
170 case 28: return sim_float_to_ufixed(val, 11, 10);
171 case 29: return sim_float_to_ufixed(val, 12, 10);
172 case 30: return sim_float_to_ufixed(val, 13, 10);
173 case 31: return sim_float_to_ufixed(val, 11, 10);
174 case 32: return sim_float_to_ufixed(val, 12, 10);
175 case 33: return sim_float_to_ufixed(val, 12, 9);
176 case 34: return sim_float_to_ufixed(val, 14, 11);
177 case 35: return sim_float_to_ufixed(val, 12, 10);
178 case 36: return sim_float_to_ufixed(val, 12, 10);
179 case 37: return sim_float_to_ufixed(val, 10, 10);
180 case 38: return sim_float_to_ufixed(val, 11, 10);
181 case 39: return sim_float_to_ufixed(val, 13, 10);
182 case 40: return sim_float_to_ufixed(val, 10, 10);
183 case 41: return sim_float_to_ufixed(val, 7, 9);
184 case 42: return sim_float_to_ufixed(val, 11, 10);
185 case 43: return sim_float_to_ufixed(val, 7, 8);
186 case 44: return sim_float_to_ufixed(val, 10, 8);
187 case 45: return 0.0f;
188 case 46: return sim_float_to_ufixed(val, 7, 8);
189 case 47: return sim_float_to_ufixed(val, 5, 7);
190 case 48: return 0.0f;
191 case 49: return sim_float_to_ufixed(val, 11, 11);
192 case 50: return sim_float_to_ufixed(val, 13, 10);
193 case 51: return sim_float_to_ufixed(val, 7, 8);
194 case 52: return sim_float_to_ufixed(val, 11, 11);
195 case 53: return sim_float_to_ufixed(val, 14, 11);
196 case 54: return sim_float_to_ufixed(val, 7, 7);
197 case 55: return sim_float_to_ufixed(val, 3, 7);
198 case 56: return sim_float_to_ufixed(val, 8, 9);
199 case 57: return sim_float_to_ufixed(val, 8, 8);
200 case 58: return sim_float_to_ufixed(val, 13, 10);
201 case 59: return sim_float_to_ufixed(val, 11, 9);
202 case 60: return sim_float_to_ufixed(val, 11, 9);
203 case 61: return sim_float_to_ufixed(val, 11, 8);
204 case 62: return sim_float_to_ufixed(val, 10, 9);
205 case 63: return sim_float_to_ufixed(val, 7, 9);
206 default: return val;
207 }
208}
209
210float sim_dense_2_input_quant(unsigned index, float val)
211{
212 switch (index) {
213 case 0: return sim_float_to_ufixed(val, 3, -1);
214 case 1: return sim_float_to_ufixed(val, 13, 9);
215 case 2: return 0.0f;
216 case 3: return sim_float_to_ufixed(val, 10, 6);
217 case 4: return sim_float_to_ufixed(val, 9, 5);
218 case 5: return sim_float_to_ufixed(val, 10, 6);
219 case 6: return sim_float_to_ufixed(val, 8, 6);
220 case 7: return sim_float_to_ufixed(val, 14, 10);
221 case 8: return 0.0f;
222 case 9: return 0.0f;
223 case 10: return 0.0f;
224 case 11: return sim_float_to_ufixed(val, 10, 6);
225 case 12: return sim_float_to_ufixed(val, 9, 5);
226 case 13: return sim_float_to_ufixed(val, 1, 5);
227 case 14: return sim_float_to_ufixed(val, 8, 5);
228 case 15: return 0.0f;
229 case 16: return sim_float_to_ufixed(val, 9, 5);
230 case 17: return sim_float_to_ufixed(val, 13, 9);
231 case 18: return sim_float_to_ufixed(val, 10, 6);
232 case 19: return sim_float_to_ufixed(val, 10, 7);
233 case 20: return sim_float_to_ufixed(val, 7, 3);
234 case 21: return 0.0f;
235 case 22: return sim_float_to_ufixed(val, 11, 7);
236 case 23: return sim_float_to_ufixed(val, 10, 6);
237 case 24: return sim_float_to_ufixed(val, 11, 7);
238 case 25: return 0.0f;
239 case 26: return sim_float_to_ufixed(val, 10, 6);
240 case 27: return 0.0f;
241 case 28: return 0.0f;
242 case 29: return sim_float_to_ufixed(val, 9, 5);
243 case 30: return sim_float_to_ufixed(val, 3, 4);
244 case 31: return sim_float_to_ufixed(val, 10, 7);
245 case 32: return 0.0f;
246 case 33: return sim_float_to_ufixed(val, 10, 7);
247 case 34: return 0.0f;
248 case 35: return sim_float_to_ufixed(val, 8, 7);
249 case 36: return sim_float_to_ufixed(val, 7, 3);
250 case 37: return sim_float_to_ufixed(val, 9, 5);
251 case 38: return sim_float_to_ufixed(val, 11, 7);
252 case 39: return sim_float_to_ufixed(val, 4, 5);
253 case 40: return sim_float_to_ufixed(val, 10, 6);
254 case 41: return sim_float_to_ufixed(val, 10, 6);
255 case 42: return sim_float_to_ufixed(val, 12, 9);
256 case 43: return sim_float_to_ufixed(val, 10, 6);
257 case 44: return sim_float_to_ufixed(val, 6, 6);
258 case 45: return sim_float_to_ufixed(val, 7, 3);
259 case 46: return sim_float_to_ufixed(val, 10, 6);
260 case 47: return sim_float_to_ufixed(val, 9, 5);
261 case 48: return sim_float_to_ufixed(val, 7, 4);
262 case 49: return sim_float_to_ufixed(val, 10, 6);
263 case 50: return sim_float_to_ufixed(val, 13, 9);
264 case 51: return 0.0f;
265 case 52: return sim_float_to_ufixed(val, 9, 5);
266 case 53: return sim_float_to_ufixed(val, 10, 8);
267 case 54: return 0.0f;
268 case 55: return sim_float_to_ufixed(val, 9, 5);
269 case 56: return sim_float_to_ufixed(val, 9, 5);
270 case 57: return sim_float_to_ufixed(val, 13, 9);
271 case 58: return 0.0f;
272 case 59: return sim_float_to_ufixed(val, 10, 7);
273 case 60: return sim_float_to_ufixed(val, 10, 6);
274 case 61: return sim_float_to_ufixed(val, 8, 4);
275 case 62: return sim_float_to_ufixed(val, 10, 6);
276 case 63: return 0.0f;
277 default: return val;
278 }
279}
280
281
282
283void
285{
286 // check parameters
287 bool okay = true;
288 // ensure that length of lists matches number of sectors
289 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
290 B2ERROR("Number of nHidden lists should be 1 or " << p.nMLP);
291 okay = false;
292 }
293 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
294 B2ERROR("Number of outputScale lists should be 1 or " << p.nMLP);
295 okay = false;
296 }
297 // ensure that number of target nodes is valid
298 unsigned short nTarget = int(p.targetresult);
299 if (nTarget < 1) {
300 B2ERROR("No outputs! Turn on targetresult.");
301 okay = false;
302 }
303 for (unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
304 if (p.outputScale[iScale].size() != 2 * nTarget) {
305 B2ERROR("outputScale should be exactly " << 2 * nTarget << " values");
306 okay = false;
307 }
308 }
309
310 if (!okay) return;
311 // initialize MLPs
312 m_MLPs.clear();
313 for (unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
314 //get indices for sector parameters
315 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
316 vector<float> nhidden = p.nHidden[iMLP];
317 vector<unsigned short> nNodes = {nInput};
318 for (unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
319 if (p.multiplyHidden) {
320 nNodes.push_back(nhidden[iHid] * nNodes[0]);
321 } else {
322 nNodes.push_back(nhidden[iHid]);
323 }
324 }
325 nNodes.push_back(nTarget);
326 unsigned short targetVars = int(p.targetresult);
327 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
328 m_MLPs.push_back(GRLMLP(nNodes, targetVars, outputScale));
329 }
330}
331
332
333float sim_ap_fixed(float val, int total_bits = 12, int int_bits = 12,
334 bool round = true, bool wrap = true)
335{
336 int frac_bits = total_bits - int_bits; //
337 float scale = std::pow(2, frac_bits); // scale = 1
338
339 // Apply rounding if needed
340 float scaled_val = val * scale;
341 int fixed_val = round ? std::round(scaled_val) : std::floor(scaled_val);
342
343 int max_int = std::pow(2, total_bits) - 1; // For 12-bit unsigned, max = 4095
344 int raw_val;
345
346 if (wrap) {
347 //
348 raw_val = fixed_val & max_int; // == fixed_val % (1 << total_bits)
349 } else {
350 //
351 raw_val = std::min(std::max(fixed_val, 0), max_int);
352 }
353
354 return raw_val / scale;
355}
356
357//
358float sim_fix_input_layer_t(float val)
359{
360 return sim_ap_fixed(val, 12, 12, true, true); // AP_RND + AP_WRAP
361}
362
363std::vector<float> sim_dense_0_iq(const std::vector<float>& input)
364{
365 const std::vector<std::pair<int, int>> dense_0_iq_config = {
366 {12, 5}, {12, 4}, {10, 6}, {8, 4}, {8, 4}, {9, 5},
367 {6, 2}, {8, 3}, {6, 3}, {5, 4}, {7, 4}, {9, 5},
368 {8, 2}, {8, 2}, {6, 3}, {5, 3}, {8, 4}, {6, 2}
369 };
370
371 std::vector<float> output;
372 output.reserve(input.size());
373 for (size_t i = 0; i < input.size(); ++i) {
374 int total_bits = dense_0_iq_config[i].first;
375 int int_bits = dense_0_iq_config[i].second;
376 output.push_back(sim_ap_fixed(input[i], total_bits, int_bits, true, true));
377
378 }
379 return output;
380}
382
383
384
386float
387GRLNeuro::runMLP(unsigned isector, const std::vector<float>& input)
388{
389
390
391 const GRLMLP& expert = m_MLPs[isector];
392 vector<float> weights = expert.getWeights();
393 vector<float> bias = expert.getBias();
394 vector<float> layerinput = input;
395
396 vector<float> layeroutput2 = {};
397 vector<float> layeroutput3 = {};
398 vector<float> layeroutput4 = {};
399
401 for (size_t i = 0; i < layerinput.size(); ++i) {
402 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
403 }
404 layeroutput2.clear();
405 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
406
407 unsigned num_inputs = layerinput.size();
408 unsigned num_neurons = expert.getNumberOfNodesLayer(2); // 64
409 for (unsigned io = 0; io < num_neurons; ++io) {
410 float bias_raw = bias[io];
411 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
412 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
413 layeroutput2[io] = bias_contrib;
414 }
415
416 unsigned iw = 0;
417// input*weight
418 for (unsigned ii = 0; ii < num_inputs; ++ii) {
419 float input_val = layerinput[ii];
420 for (unsigned io = 0; io < num_neurons; ++io) {
421 float weight_raw = weights[iw];
422 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
423 float product = input_val * weight_fixed;
424 float contrib = sim_fix_dense_0_accum_t(product);
425
426 layeroutput2[io] += contrib;
427
428 ++iw;
429 }
430 }
431
432
433//apply activation function, ReLU for hidden layer and output layer
434// === dense_0_t + ReLU ===
435 std::vector<float> layeroutput2_fixed_relu(num_neurons);
436
437 for (unsigned io = 0; io < num_neurons; ++io) {
438 // dense_0_t)
439 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
440
441 // ReLU
442 float relu_val = (fixed_val > 0) ? fixed_val : 0;
443
444 layeroutput2_fixed_relu[io] = relu_val;
445
446 }
447
448 std::vector<float> dense1_input(64);
449 for (unsigned i = 0; i < 64; ++i) {
450 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
451 }
452
453 layeroutput3.clear();
454 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
455 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
456 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
457 for (unsigned io = 64; io < num_neurons_1 + 64; ++io) {
458 float bias_raw = bias[io];
459 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
460 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
461 layeroutput3[io - 64] = bias_contrib;
462
463 }
464
465
466 for (unsigned ii = 0; ii < num_inputs_1; ++ii) {
467 float input_val = dense1_input[ii];
468 for (unsigned io = 0; io < num_neurons_1; ++io) {
469
470 float weight_raw = weights[iw];
471
472 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
473 float product = input_val * weight_fixed;
474 float contrib = sim_fix_dense_1_accum_t(product);
475
476 layeroutput3[io] += contrib;
477 ++iw;
478 }
479 }
480
481
482 std::vector<float> layeroutput3_fixed_relu(num_neurons);
483
484
485 for (unsigned io = 0; io < num_neurons_1; ++io) {
486 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
487 // ReLU
488 float relu_val = (fixed_val > 0) ? fixed_val : 0;
489
490 layeroutput3_fixed_relu[io] = relu_val;
491
492 }
493 std::vector<float> dense2_input(64);
494 for (unsigned i = 0; i < 64; ++i) {
495 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
496 }
497 layeroutput4.clear();
498 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
499
500 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
501 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
502 for (unsigned io = 128; io < num_neurons_2 + 128; ++io) {
503 float bias_raw = bias[io];
504 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
505 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
506 layeroutput4[io - 128] = bias_contrib;
507
508 }
509
510 for (unsigned ii = 0; ii < num_inputs_2; ++ii) {
511 float input_val = dense2_input[ii];
512 for (unsigned io = 0; io < num_neurons_2; ++io) {
513 float weight_raw = weights[iw];
514 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
515 float product = input_val * weight_fixed;
516 float contrib = sim_fix_dense_2_accum_t(product);
517
518 layeroutput4[io] += contrib;
519
520 ++iw;
521 }
522 }
523 return layeroutput4[0];
524
525}
526
527
528bool GRLNeuro::load(unsigned isector, const string& weightfilename, const string& biasfilename)
529{
530 if (weightfilename.size() < 1) {
531 B2ERROR("Could not load Neurotrigger weights from database!");
532 return false;
533 } else if (biasfilename.size() < 1) {
534 B2ERROR("Could not load Neurotrigger bias from database!");
535 return false;
536 } else {
537 std::ifstream wfile(weightfilename);
538 if (!wfile.is_open()) {
539 B2WARNING("Could not open file " << weightfilename);
540 return false;
541 } else {
542 std::ifstream bfile(biasfilename);
543 if (!bfile.is_open()) {
544 B2WARNING("Could not open file " << biasfilename);
545 return false;
546 } else {
547 GRLMLP& expert = m_MLPs[isector];
548 std::vector<float> warray;
549 std::vector<float> barray;
550 warray.clear();
551 barray.clear();
552
553 float element;
554 while (wfile >> element) {
555 warray.push_back(element);
556 }
557 while (bfile >> element) {
558 barray.push_back(element);
559 }
560
561 if (warray.size() != expert.nWeightsCal()) {
562 B2ERROR("Number of weights is not equal to registered architecture!");
563 return false;
564 } else expert.setWeights(warray);
565 if (barray.size() != expert.nBiasCal()) {
566 B2ERROR("Number of bias is not equal to registered architecture!");
567 return false;
568 }
569
570 expert.setWeights(warray);
571 expert.setBias(barray);
572 return true;
573 }
574 }
575 }
576}
577
578bool GRLNeuro::load(unsigned isector, std::vector<float> warray, std::vector<float> barray)
579{
580 GRLMLP& expert = m_MLPs[isector];
581 expert.setWeights(warray);
582 expert.setBias(barray);
583 return true;
584}
Class to keep all parameters of an expert MLP for the neuro trigger.
Definition GRLMLP.h:21
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
Definition GRLNeuro.cc:284
bool load(unsigned isector, const std::string &wfilename, const std::string &bfilename)
Load MLPs from file.
Definition GRLNeuro.cc:528
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
Definition GRLNeuro.cc:387
std::vector< GRLMLP > m_MLPs
List of networks.
Definition GRLNeuro.h:114
Abstract base class for different kinds of events.
STL namespace.
Struct to keep neurotrigger parameters.
Definition GRLNeuro.h:41