9#include <framework/logging/Logger.h>
10#include <trg/grl/GRLNeuro.h>
11#include <trg/grl/dataobjects/GRLMLP.h>
12#include <cdc/geometry/CDCGeometryPar.h>
13#include <framework/gearbox/Const.h>
14#include <framework/gearbox/Unit.h>
15#include <framework/datastore/StoreObjPtr.h>
16#include <framework/datastore/StoreArray.h>
17#include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
18#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
32enum RoundingMode { TRN, RND };
33enum SaturationMode { NONE, SAT, WRAP, SAT_SYM };
36float sim_fixed(
float val,
int total_bits,
int int_bits,
37 bool is_signed =
true,
38 RoundingMode rounding = TRN,
39 SaturationMode saturation = SAT)
41 int frac_bits = total_bits - int_bits;
42 float scale = std::pow(2.0f, frac_bits);
43 float scaled_val = val * scale;
47 fixed_val =
static_cast<int64_t
>(std::round(scaled_val));
49 fixed_val =
static_cast<int64_t
>(std::trunc(scaled_val));
51 int64_t max_val, min_val;
54 max_val = (1LL << (total_bits - 1)) - 1;
55 min_val = -(1LL << (total_bits - 1));
57 max_val = (1LL << total_bits) - 1;
62 if (fixed_val > max_val || fixed_val < min_val) {
65 fixed_val = std::min(std::max(fixed_val, min_val), max_val);
69 int64_t mod = 1LL << total_bits;
70 fixed_val = (fixed_val + mod) % mod;
71 if (fixed_val >= (1LL << (total_bits - 1)))
72 fixed_val -= (1LL << total_bits);
74 fixed_val = fixed_val % (1LL << total_bits);
79 fixed_val = std::min(fixed_val, max_val);
81 fixed_val = std::max(fixed_val, min_val);
89 return static_cast<float>(fixed_val) / scale;
93inline float sim_fix_dense_0_accum_t(
float x) {
return sim_fixed(x, 24, 16); }
94inline float sim_fix_dense_0_t(
float x) {
return sim_fixed(x, 20, 16); }
95inline float sim_fix_dense_0_weight_t(
float x) {
return sim_fixed(x, 10, 2); }
96inline float sim_fix_dense_0_bias_t(
float x) {
return sim_fixed(x, 5, 1); }
99inline float sim_fix_dense_0_relu_t(
float x) {
return sim_fixed(x, 15, 11,
false); }
100inline float sim_fix_dense_0_relu_table_t(
float x) {
return sim_fixed(x, 18, 8); }
103inline float sim_fix_dense_1_iq_t(
float x) {
return sim_fixed(x, 14, 11,
false); }
104inline float sim_fix_dense_1_accum_t(
float x) {
return sim_fixed(x, 23, 14); }
105inline float sim_fix_dense_1_t(
float x) {
return sim_fixed(x, 19, 14); }
106inline float sim_fix_dense_1_weight_t(
float x) {
return sim_fixed(x, 8, 2); }
107inline float sim_fix_dense_1_bias_t(
float x) {
return sim_fixed(x, 5, 1); }
110inline float sim_fix_dense_1_relu_t(
float x) {
return sim_fixed(x, 15, 10,
false); }
111inline float sim_fix_dense_1_relu_table_t(
float x) {
return sim_fixed(x, 18, 8); }
114inline float sim_fix_dense_2_iq_t(
float x) {
return sim_fixed(x, 14, 10,
false); }
115inline float sim_fix_dense_2_accum_t(
float x) {
return sim_fixed(x, 19, 10); }
116inline float sim_fix_dense_2_weight_t(
float x) {
return sim_fixed(x, 8, 1); }
117inline float sim_fix_dense_2_bias_t(
float x) {
return sim_fixed(x, 1, 0,
false); }
120inline float sim_fix_result_t(
float x)
122 return sim_fixed(x, 15, 8,
true, RND, WRAP);
126float sim_float_to_ufixed(
float val,
int W,
int I)
129 float scale = std::pow(2.0f, F);
130 float rounded = std::round(val * scale) / scale;
133 float max_val = std::pow(2.0f, I) - 1.0f / scale;
134 float min_val = 0.0f;
136 if (rounded > max_val)
return max_val;
137 if (rounded < min_val)
return min_val;
142float sim_dense_1_input_quant(
unsigned index,
float val)
145 case 0:
return sim_float_to_ufixed(val, 7, 8);
146 case 1:
return sim_float_to_ufixed(val, 12, 9);
147 case 2:
return sim_float_to_ufixed(val, 14, 11);
148 case 3:
return sim_float_to_ufixed(val, 11, 9);
149 case 4:
return sim_float_to_ufixed(val, 7, 7);
150 case 5:
return sim_float_to_ufixed(val, 4, 9);
151 case 6:
return sim_float_to_ufixed(val, 13, 10);
152 case 7:
return sim_float_to_ufixed(val, 10, 8);
153 case 8:
return sim_float_to_ufixed(val, 9, 8);
154 case 9:
return sim_float_to_ufixed(val, 8, 8);
155 case 10:
return sim_float_to_ufixed(val, 8, 7);
156 case 11:
return sim_float_to_ufixed(val, 12, 10);
157 case 12:
return sim_float_to_ufixed(val, 7, 7);
158 case 13:
return sim_float_to_ufixed(val, 6, 9);
159 case 14:
return sim_float_to_ufixed(val, 11, 10);
160 case 15:
return sim_float_to_ufixed(val, 9, 7);
161 case 16:
return sim_float_to_ufixed(val, 14, 11);
162 case 17:
return sim_float_to_ufixed(val, 13, 10);
163 case 18:
return sim_float_to_ufixed(val, 8, 11);
164 case 19:
return sim_float_to_ufixed(val, 10, 8);
165 case 20:
return sim_float_to_ufixed(val, 7, 7);
166 case 21:
return sim_float_to_ufixed(val, 12, 10);
167 case 22:
return sim_float_to_ufixed(val, 7, 8);
168 case 23:
return sim_float_to_ufixed(val, 4, 7);
169 case 24:
return sim_float_to_ufixed(val, 13, 10);
170 case 25:
return sim_float_to_ufixed(val, 6, 5);
171 case 26:
return sim_float_to_ufixed(val, 12, 10);
172 case 27:
return sim_float_to_ufixed(val, 7, 9);
173 case 28:
return sim_float_to_ufixed(val, 11, 10);
174 case 29:
return sim_float_to_ufixed(val, 12, 10);
175 case 30:
return sim_float_to_ufixed(val, 13, 10);
176 case 31:
return sim_float_to_ufixed(val, 11, 10);
177 case 32:
return sim_float_to_ufixed(val, 12, 10);
178 case 33:
return sim_float_to_ufixed(val, 12, 9);
179 case 34:
return sim_float_to_ufixed(val, 14, 11);
180 case 35:
return sim_float_to_ufixed(val, 12, 10);
181 case 36:
return sim_float_to_ufixed(val, 12, 10);
182 case 37:
return sim_float_to_ufixed(val, 10, 10);
183 case 38:
return sim_float_to_ufixed(val, 11, 10);
184 case 39:
return sim_float_to_ufixed(val, 13, 10);
185 case 40:
return sim_float_to_ufixed(val, 10, 10);
186 case 41:
return sim_float_to_ufixed(val, 7, 9);
187 case 42:
return sim_float_to_ufixed(val, 11, 10);
188 case 43:
return sim_float_to_ufixed(val, 7, 8);
189 case 44:
return sim_float_to_ufixed(val, 10, 8);
190 case 45:
return 0.0f;
191 case 46:
return sim_float_to_ufixed(val, 7, 8);
192 case 47:
return sim_float_to_ufixed(val, 5, 7);
193 case 48:
return 0.0f;
194 case 49:
return sim_float_to_ufixed(val, 11, 11);
195 case 50:
return sim_float_to_ufixed(val, 13, 10);
196 case 51:
return sim_float_to_ufixed(val, 7, 8);
197 case 52:
return sim_float_to_ufixed(val, 11, 11);
198 case 53:
return sim_float_to_ufixed(val, 14, 11);
199 case 54:
return sim_float_to_ufixed(val, 7, 7);
200 case 55:
return sim_float_to_ufixed(val, 3, 7);
201 case 56:
return sim_float_to_ufixed(val, 8, 9);
202 case 57:
return sim_float_to_ufixed(val, 8, 8);
203 case 58:
return sim_float_to_ufixed(val, 13, 10);
204 case 59:
return sim_float_to_ufixed(val, 11, 9);
205 case 60:
return sim_float_to_ufixed(val, 11, 9);
206 case 61:
return sim_float_to_ufixed(val, 11, 8);
207 case 62:
return sim_float_to_ufixed(val, 10, 9);
208 case 63:
return sim_float_to_ufixed(val, 7, 9);
213float sim_dense_2_input_quant(
unsigned index,
float val)
216 case 0:
return sim_float_to_ufixed(val, 3, -1);
217 case 1:
return sim_float_to_ufixed(val, 13, 9);
219 case 3:
return sim_float_to_ufixed(val, 10, 6);
220 case 4:
return sim_float_to_ufixed(val, 9, 5);
221 case 5:
return sim_float_to_ufixed(val, 10, 6);
222 case 6:
return sim_float_to_ufixed(val, 8, 6);
223 case 7:
return sim_float_to_ufixed(val, 14, 10);
226 case 10:
return 0.0f;
227 case 11:
return sim_float_to_ufixed(val, 10, 6);
228 case 12:
return sim_float_to_ufixed(val, 9, 5);
229 case 13:
return sim_float_to_ufixed(val, 1, 5);
230 case 14:
return sim_float_to_ufixed(val, 8, 5);
231 case 15:
return 0.0f;
232 case 16:
return sim_float_to_ufixed(val, 9, 5);
233 case 17:
return sim_float_to_ufixed(val, 13, 9);
234 case 18:
return sim_float_to_ufixed(val, 10, 6);
235 case 19:
return sim_float_to_ufixed(val, 10, 7);
236 case 20:
return sim_float_to_ufixed(val, 7, 3);
237 case 21:
return 0.0f;
238 case 22:
return sim_float_to_ufixed(val, 11, 7);
239 case 23:
return sim_float_to_ufixed(val, 10, 6);
240 case 24:
return sim_float_to_ufixed(val, 11, 7);
241 case 25:
return 0.0f;
242 case 26:
return sim_float_to_ufixed(val, 10, 6);
243 case 27:
return 0.0f;
244 case 28:
return 0.0f;
245 case 29:
return sim_float_to_ufixed(val, 9, 5);
246 case 30:
return sim_float_to_ufixed(val, 3, 4);
247 case 31:
return sim_float_to_ufixed(val, 10, 7);
248 case 32:
return 0.0f;
249 case 33:
return sim_float_to_ufixed(val, 10, 7);
250 case 34:
return 0.0f;
251 case 35:
return sim_float_to_ufixed(val, 8, 7);
252 case 36:
return sim_float_to_ufixed(val, 7, 3);
253 case 37:
return sim_float_to_ufixed(val, 9, 5);
254 case 38:
return sim_float_to_ufixed(val, 11, 7);
255 case 39:
return sim_float_to_ufixed(val, 4, 5);
256 case 40:
return sim_float_to_ufixed(val, 10, 6);
257 case 41:
return sim_float_to_ufixed(val, 10, 6);
258 case 42:
return sim_float_to_ufixed(val, 12, 9);
259 case 43:
return sim_float_to_ufixed(val, 10, 6);
260 case 44:
return sim_float_to_ufixed(val, 6, 6);
261 case 45:
return sim_float_to_ufixed(val, 7, 3);
262 case 46:
return sim_float_to_ufixed(val, 10, 6);
263 case 47:
return sim_float_to_ufixed(val, 9, 5);
264 case 48:
return sim_float_to_ufixed(val, 7, 4);
265 case 49:
return sim_float_to_ufixed(val, 10, 6);
266 case 50:
return sim_float_to_ufixed(val, 13, 9);
267 case 51:
return 0.0f;
268 case 52:
return sim_float_to_ufixed(val, 9, 5);
269 case 53:
return sim_float_to_ufixed(val, 10, 8);
270 case 54:
return 0.0f;
271 case 55:
return sim_float_to_ufixed(val, 9, 5);
272 case 56:
return sim_float_to_ufixed(val, 9, 5);
273 case 57:
return sim_float_to_ufixed(val, 13, 9);
274 case 58:
return 0.0f;
275 case 59:
return sim_float_to_ufixed(val, 10, 7);
276 case 60:
return sim_float_to_ufixed(val, 10, 6);
277 case 61:
return sim_float_to_ufixed(val, 8, 4);
278 case 62:
return sim_float_to_ufixed(val, 10, 6);
279 case 63:
return 0.0f;
292 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
293 B2ERROR(
"Number of nHidden lists should be 1 or " << p.nMLP);
296 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
297 B2ERROR(
"Number of outputScale lists should be 1 or " << p.nMLP);
301 unsigned short nTarget = int(p.targetresult);
303 B2ERROR(
"No outputs! Turn on targetresult.");
306 for (
unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
307 if (p.outputScale[iScale].size() != 2 * nTarget) {
308 B2ERROR(
"outputScale should be exactly " << 2 * nTarget <<
" values");
316 for (
unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
318 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
319 vector<float> nhidden = p.nHidden[iMLP];
320 vector<unsigned short> nNodes = {nInput};
321 for (
unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
322 if (p.multiplyHidden) {
323 nNodes.push_back(nhidden[iHid] * nNodes[0]);
325 nNodes.push_back(nhidden[iHid]);
328 nNodes.push_back(nTarget);
329 unsigned short targetVars = int(p.targetresult);
330 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
331 m_MLPs.push_back(
GRLMLP(nNodes, targetVars, outputScale));
336float sim_ap_fixed(
float val,
int total_bits = 12,
int int_bits = 12,
337 bool round =
true,
bool wrap =
true)
339 int frac_bits = total_bits - int_bits;
340 float scale = std::pow(2, frac_bits);
343 float scaled_val = val * scale;
344 int fixed_val = round ? std::round(scaled_val) :
std::floor(scaled_val);
346 int max_int = std::pow(2, total_bits) - 1;
351 raw_val = fixed_val & max_int;
354 raw_val = std::min(std::max(fixed_val, 0), max_int);
357 return raw_val / scale;
361float sim_fix_input_layer_t(
float val)
363 return sim_ap_fixed(val, 12, 12,
true,
true);
366std::vector<float> sim_dense_0_iq(
const std::vector<float>& input)
368 const std::vector<std::pair<int, int>> dense_0_iq_config = {
369 {12, 5}, {12, 4}, {10, 6}, {8, 4}, {8, 4}, {9, 5},
370 {6, 2}, {8, 3}, {6, 3}, {5, 4}, {7, 4}, {9, 5},
371 {8, 2}, {8, 2}, {6, 3}, {5, 3}, {8, 4}, {6, 2}
374 std::vector<float> output;
375 output.reserve(input.size());
376 for (
size_t i = 0; i < input.size(); ++i) {
377 int total_bits = dense_0_iq_config[i].first;
378 int int_bits = dense_0_iq_config[i].second;
379 output.push_back(sim_ap_fixed(input[i], total_bits, int_bits, RND, SAT_SYM));
394 vector<float> weights = expert.getWeights();
395 vector<float> bias = expert.getBias();
396 vector<float> layerinput = input;
398 vector<float> layeroutput2 = {};
399 vector<float> layeroutput3 = {};
400 vector<float> layeroutput4 = {};
403 for (
size_t i = 0; i < layerinput.size(); ++i) {
404 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
406 layeroutput2.clear();
407 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
409 unsigned num_inputs = layerinput.size();
410 unsigned num_neurons = expert.getNumberOfNodesLayer(2);
411 for (
unsigned io = 0; io < num_neurons; ++io) {
412 float bias_raw = bias[io];
413 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
414 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
415 layeroutput2[io] = bias_contrib;
420 for (
unsigned ii = 0; ii < num_inputs; ++ii) {
421 float input_val = layerinput[ii];
422 for (
unsigned io = 0; io < num_neurons; ++io) {
423 float weight_raw = weights[iw];
424 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
425 float product = input_val * weight_fixed;
426 float contrib = sim_fix_dense_0_accum_t(product);
428 layeroutput2[io] += contrib;
437 std::vector<float> layeroutput2_fixed_relu(num_neurons);
439 for (
unsigned io = 0; io < num_neurons; ++io) {
441 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
444 float relu_val = (fixed_val > 0) ? fixed_val : 0;
446 layeroutput2_fixed_relu[io] = relu_val;
450 std::vector<float> dense1_input(64);
451 for (
unsigned i = 0; i < 64; ++i) {
452 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
455 layeroutput3.clear();
456 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
457 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
458 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
459 for (
unsigned io = 64; io < num_neurons_1 + 64; ++io) {
460 float bias_raw = bias[io];
461 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
462 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
463 layeroutput3[io - 64] = bias_contrib;
468 for (
unsigned ii = 0; ii < num_inputs_1; ++ii) {
469 float input_val = dense1_input[ii];
470 for (
unsigned io = 0; io < num_neurons_1; ++io) {
472 float weight_raw = weights[iw];
474 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
475 float product = input_val * weight_fixed;
476 float contrib = sim_fix_dense_1_accum_t(product);
478 layeroutput3[io] += contrib;
484 std::vector<float> layeroutput3_fixed_relu(num_neurons);
487 for (
unsigned io = 0; io < num_neurons_1; ++io) {
488 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
490 float relu_val = (fixed_val > 0) ? fixed_val : 0;
492 layeroutput3_fixed_relu[io] = relu_val;
495 std::vector<float> dense2_input(64);
496 for (
unsigned i = 0; i < 64; ++i) {
497 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
499 layeroutput4.clear();
500 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
502 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
503 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
504 for (
unsigned io = 128; io < num_neurons_2 + 128; ++io) {
505 float bias_raw = bias[io];
506 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
507 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
508 layeroutput4[io - 128] = bias_contrib;
512 for (
unsigned ii = 0; ii < num_inputs_2; ++ii) {
513 float input_val = dense2_input[ii];
514 for (
unsigned io = 0; io < num_neurons_2; ++io) {
515 float weight_raw = weights[iw];
516 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
517 float product = input_val * weight_fixed;
518 float contrib = sim_fix_dense_2_accum_t(product);
520 layeroutput4[io] += contrib;
525 return layeroutput4[0];
530bool GRLNeuro::load(
unsigned isector,
const string& weightfilename,
const string& biasfilename)
532 if (weightfilename.size() < 1) {
533 B2ERROR(
"Could not load Neurotrigger weights from database!");
535 }
else if (biasfilename.size() < 1) {
536 B2ERROR(
"Could not load Neurotrigger bias from database!");
539 std::ifstream wfile(weightfilename);
540 if (!wfile.is_open()) {
541 B2WARNING(
"Could not open file " << weightfilename);
544 std::ifstream bfile(biasfilename);
545 if (!bfile.is_open()) {
546 B2WARNING(
"Could not open file " << biasfilename);
550 std::vector<float> warray;
551 std::vector<float> barray;
556 while (wfile >> element) {
557 warray.push_back(element);
559 while (bfile >> element) {
560 barray.push_back(element);
563 if (warray.size() != expert.nWeightsCal()) {
564 B2ERROR(
"Number of weights is not equal to registered architecture!");
566 }
else expert.setWeights(warray);
567 if (barray.size() != expert.nBiasCal()) {
568 B2ERROR(
"Number of bias is not equal to registered architecture!");
572 expert.setWeights(warray);
573 expert.setBias(barray);
582bool GRLNeuro::load(
unsigned isector, std::vector<float> warray, std::vector<float> barray)
585 expert.setWeights(warray);
586 expert.setBias(barray);
Class to keep all parameters of an expert MLP for the neuro trigger.
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
bool load(unsigned isector, const std::string &wfilename, const std::string &bfilename)
Load MLPs from file.
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
std::vector< GRLMLP > m_MLPs
List of networks.
Abstract base class for different kinds of events.
Struct to keep neurotrigger parameters.