9#include <framework/logging/Logger.h>
10#include <trg/grl/GRLNeuro.h>
11#include <trg/grl/dataobjects/GRLMLP.h>
12#include <cdc/geometry/CDCGeometryPar.h>
13#include <framework/gearbox/Const.h>
14#include <framework/gearbox/Unit.h>
15#include <framework/datastore/StoreObjPtr.h>
16#include <framework/datastore/StoreArray.h>
17#include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
18#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
33float sim_fixed(
float val,
int total_bits,
int int_bits,
34 bool is_signed =
true,
38 int frac_bits = total_bits - int_bits;
39 float scale = std::pow(2.0f, frac_bits);
40 float scaled_val = val * scale;
44 fixed_val =
static_cast<int64_t
>(std::round(scaled_val));
46 fixed_val =
static_cast<int64_t
>(std::trunc(scaled_val));
48 int64_t max_val, min_val;
51 max_val = (1LL << (total_bits - 1)) - 1;
52 min_val = -(1LL << (total_bits - 1));
54 max_val = (1LL << total_bits) - 1;
59 if (fixed_val > max_val || fixed_val < min_val) {
62 fixed_val = std::min(std::max(fixed_val, min_val), max_val);
66 int64_t mod = 1LL << total_bits;
67 fixed_val = (fixed_val + mod) % mod;
68 if (fixed_val >= (1LL << (total_bits - 1)))
69 fixed_val -= (1LL << total_bits);
71 fixed_val = fixed_val % (1LL << total_bits);
76 fixed_val = std::min(fixed_val, max_val);
78 fixed_val = std::max(fixed_val, min_val);
86 return static_cast<float>(fixed_val) / scale;
90inline float sim_fix_dense_0_accum_t(
float x) {
return sim_fixed(x, 24, 16); }
91inline float sim_fix_dense_0_t(
float x) {
return sim_fixed(x, 20, 16); }
92inline float sim_fix_dense_0_weight_t(
float x) {
return sim_fixed(x, 10, 2); }
93inline float sim_fix_dense_0_bias_t(
float x) {
return sim_fixed(x, 5, 1); }
96inline float sim_fix_dense_0_relu_t(
float x) {
return sim_fixed(x, 15, 11,
false); }
97inline float sim_fix_dense_0_relu_table_t(
float x) {
return sim_fixed(x, 18, 8); }
100inline float sim_fix_dense_1_iq_t(
float x) {
return sim_fixed(x, 14, 11,
false); }
101inline float sim_fix_dense_1_accum_t(
float x) {
return sim_fixed(x, 23, 14); }
102inline float sim_fix_dense_1_t(
float x) {
return sim_fixed(x, 19, 14); }
103inline float sim_fix_dense_1_weight_t(
float x) {
return sim_fixed(x, 8, 2); }
104inline float sim_fix_dense_1_bias_t(
float x) {
return sim_fixed(x, 5, 1); }
107inline float sim_fix_dense_1_relu_t(
float x) {
return sim_fixed(x, 15, 10,
false); }
108inline float sim_fix_dense_1_relu_table_t(
float x) {
return sim_fixed(x, 18, 8); }
111inline float sim_fix_dense_2_iq_t(
float x) {
return sim_fixed(x, 14, 10,
false); }
112inline float sim_fix_dense_2_accum_t(
float x) {
return sim_fixed(x, 19, 10); }
113inline float sim_fix_dense_2_weight_t(
float x) {
return sim_fixed(x, 8, 1); }
114inline float sim_fix_dense_2_bias_t(
float x) {
return sim_fixed(x, 1, 0,
false); }
117inline float sim_fix_result_t(
float x)
119 return sim_fixed(x, 15, 8,
true, 1, 2);
123float sim_float_to_ufixed(
float val,
int W,
int I)
126 float scale = std::pow(2.0f, F);
127 float rounded = std::round(val * scale) / scale;
130 float max_val = std::pow(2.0f, I) - 1.0f / scale;
131 float min_val = 0.0f;
133 if (rounded > max_val)
return max_val;
134 if (rounded < min_val)
return min_val;
139float sim_dense_1_input_quant(
unsigned index,
float val)
142 case 0:
return sim_float_to_ufixed(val, 7, 8);
143 case 1:
return sim_float_to_ufixed(val, 12, 9);
144 case 2:
return sim_float_to_ufixed(val, 14, 11);
145 case 3:
return sim_float_to_ufixed(val, 11, 9);
146 case 4:
return sim_float_to_ufixed(val, 7, 7);
147 case 5:
return sim_float_to_ufixed(val, 4, 9);
148 case 6:
return sim_float_to_ufixed(val, 13, 10);
149 case 7:
return sim_float_to_ufixed(val, 10, 8);
150 case 8:
return sim_float_to_ufixed(val, 9, 8);
151 case 9:
return sim_float_to_ufixed(val, 8, 8);
152 case 10:
return sim_float_to_ufixed(val, 8, 7);
153 case 11:
return sim_float_to_ufixed(val, 12, 10);
154 case 12:
return sim_float_to_ufixed(val, 7, 7);
155 case 13:
return sim_float_to_ufixed(val, 6, 9);
156 case 14:
return sim_float_to_ufixed(val, 11, 10);
157 case 15:
return sim_float_to_ufixed(val, 9, 7);
158 case 16:
return sim_float_to_ufixed(val, 14, 11);
159 case 17:
return sim_float_to_ufixed(val, 13, 10);
160 case 18:
return sim_float_to_ufixed(val, 8, 11);
161 case 19:
return sim_float_to_ufixed(val, 10, 8);
162 case 20:
return sim_float_to_ufixed(val, 7, 7);
163 case 21:
return sim_float_to_ufixed(val, 12, 10);
164 case 22:
return sim_float_to_ufixed(val, 7, 8);
165 case 23:
return sim_float_to_ufixed(val, 4, 7);
166 case 24:
return sim_float_to_ufixed(val, 13, 10);
167 case 25:
return sim_float_to_ufixed(val, 6, 5);
168 case 26:
return sim_float_to_ufixed(val, 12, 10);
169 case 27:
return sim_float_to_ufixed(val, 7, 9);
170 case 28:
return sim_float_to_ufixed(val, 11, 10);
171 case 29:
return sim_float_to_ufixed(val, 12, 10);
172 case 30:
return sim_float_to_ufixed(val, 13, 10);
173 case 31:
return sim_float_to_ufixed(val, 11, 10);
174 case 32:
return sim_float_to_ufixed(val, 12, 10);
175 case 33:
return sim_float_to_ufixed(val, 12, 9);
176 case 34:
return sim_float_to_ufixed(val, 14, 11);
177 case 35:
return sim_float_to_ufixed(val, 12, 10);
178 case 36:
return sim_float_to_ufixed(val, 12, 10);
179 case 37:
return sim_float_to_ufixed(val, 10, 10);
180 case 38:
return sim_float_to_ufixed(val, 11, 10);
181 case 39:
return sim_float_to_ufixed(val, 13, 10);
182 case 40:
return sim_float_to_ufixed(val, 10, 10);
183 case 41:
return sim_float_to_ufixed(val, 7, 9);
184 case 42:
return sim_float_to_ufixed(val, 11, 10);
185 case 43:
return sim_float_to_ufixed(val, 7, 8);
186 case 44:
return sim_float_to_ufixed(val, 10, 8);
187 case 45:
return 0.0f;
188 case 46:
return sim_float_to_ufixed(val, 7, 8);
189 case 47:
return sim_float_to_ufixed(val, 5, 7);
190 case 48:
return 0.0f;
191 case 49:
return sim_float_to_ufixed(val, 11, 11);
192 case 50:
return sim_float_to_ufixed(val, 13, 10);
193 case 51:
return sim_float_to_ufixed(val, 7, 8);
194 case 52:
return sim_float_to_ufixed(val, 11, 11);
195 case 53:
return sim_float_to_ufixed(val, 14, 11);
196 case 54:
return sim_float_to_ufixed(val, 7, 7);
197 case 55:
return sim_float_to_ufixed(val, 3, 7);
198 case 56:
return sim_float_to_ufixed(val, 8, 9);
199 case 57:
return sim_float_to_ufixed(val, 8, 8);
200 case 58:
return sim_float_to_ufixed(val, 13, 10);
201 case 59:
return sim_float_to_ufixed(val, 11, 9);
202 case 60:
return sim_float_to_ufixed(val, 11, 9);
203 case 61:
return sim_float_to_ufixed(val, 11, 8);
204 case 62:
return sim_float_to_ufixed(val, 10, 9);
205 case 63:
return sim_float_to_ufixed(val, 7, 9);
210float sim_dense_2_input_quant(
unsigned index,
float val)
213 case 0:
return sim_float_to_ufixed(val, 3, -1);
214 case 1:
return sim_float_to_ufixed(val, 13, 9);
216 case 3:
return sim_float_to_ufixed(val, 10, 6);
217 case 4:
return sim_float_to_ufixed(val, 9, 5);
218 case 5:
return sim_float_to_ufixed(val, 10, 6);
219 case 6:
return sim_float_to_ufixed(val, 8, 6);
220 case 7:
return sim_float_to_ufixed(val, 14, 10);
223 case 10:
return 0.0f;
224 case 11:
return sim_float_to_ufixed(val, 10, 6);
225 case 12:
return sim_float_to_ufixed(val, 9, 5);
226 case 13:
return sim_float_to_ufixed(val, 1, 5);
227 case 14:
return sim_float_to_ufixed(val, 8, 5);
228 case 15:
return 0.0f;
229 case 16:
return sim_float_to_ufixed(val, 9, 5);
230 case 17:
return sim_float_to_ufixed(val, 13, 9);
231 case 18:
return sim_float_to_ufixed(val, 10, 6);
232 case 19:
return sim_float_to_ufixed(val, 10, 7);
233 case 20:
return sim_float_to_ufixed(val, 7, 3);
234 case 21:
return 0.0f;
235 case 22:
return sim_float_to_ufixed(val, 11, 7);
236 case 23:
return sim_float_to_ufixed(val, 10, 6);
237 case 24:
return sim_float_to_ufixed(val, 11, 7);
238 case 25:
return 0.0f;
239 case 26:
return sim_float_to_ufixed(val, 10, 6);
240 case 27:
return 0.0f;
241 case 28:
return 0.0f;
242 case 29:
return sim_float_to_ufixed(val, 9, 5);
243 case 30:
return sim_float_to_ufixed(val, 3, 4);
244 case 31:
return sim_float_to_ufixed(val, 10, 7);
245 case 32:
return 0.0f;
246 case 33:
return sim_float_to_ufixed(val, 10, 7);
247 case 34:
return 0.0f;
248 case 35:
return sim_float_to_ufixed(val, 8, 7);
249 case 36:
return sim_float_to_ufixed(val, 7, 3);
250 case 37:
return sim_float_to_ufixed(val, 9, 5);
251 case 38:
return sim_float_to_ufixed(val, 11, 7);
252 case 39:
return sim_float_to_ufixed(val, 4, 5);
253 case 40:
return sim_float_to_ufixed(val, 10, 6);
254 case 41:
return sim_float_to_ufixed(val, 10, 6);
255 case 42:
return sim_float_to_ufixed(val, 12, 9);
256 case 43:
return sim_float_to_ufixed(val, 10, 6);
257 case 44:
return sim_float_to_ufixed(val, 6, 6);
258 case 45:
return sim_float_to_ufixed(val, 7, 3);
259 case 46:
return sim_float_to_ufixed(val, 10, 6);
260 case 47:
return sim_float_to_ufixed(val, 9, 5);
261 case 48:
return sim_float_to_ufixed(val, 7, 4);
262 case 49:
return sim_float_to_ufixed(val, 10, 6);
263 case 50:
return sim_float_to_ufixed(val, 13, 9);
264 case 51:
return 0.0f;
265 case 52:
return sim_float_to_ufixed(val, 9, 5);
266 case 53:
return sim_float_to_ufixed(val, 10, 8);
267 case 54:
return 0.0f;
268 case 55:
return sim_float_to_ufixed(val, 9, 5);
269 case 56:
return sim_float_to_ufixed(val, 9, 5);
270 case 57:
return sim_float_to_ufixed(val, 13, 9);
271 case 58:
return 0.0f;
272 case 59:
return sim_float_to_ufixed(val, 10, 7);
273 case 60:
return sim_float_to_ufixed(val, 10, 6);
274 case 61:
return sim_float_to_ufixed(val, 8, 4);
275 case 62:
return sim_float_to_ufixed(val, 10, 6);
276 case 63:
return 0.0f;
289 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
290 B2ERROR(
"Number of nHidden lists should be 1 or " << p.nMLP);
293 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
294 B2ERROR(
"Number of outputScale lists should be 1 or " << p.nMLP);
298 unsigned short nTarget = int(p.targetresult);
300 B2ERROR(
"No outputs! Turn on targetresult.");
303 for (
unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
304 if (p.outputScale[iScale].size() != 2 * nTarget) {
305 B2ERROR(
"outputScale should be exactly " << 2 * nTarget <<
" values");
313 for (
unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
315 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
316 vector<float> nhidden = p.nHidden[iMLP];
317 vector<unsigned short> nNodes = {nInput};
318 for (
unsigned iHid = 0; iHid < nhidden.size(); ++iHid) {
319 if (p.multiplyHidden) {
320 nNodes.push_back(nhidden[iHid] * nNodes[0]);
322 nNodes.push_back(nhidden[iHid]);
325 nNodes.push_back(nTarget);
326 unsigned short targetVars = int(p.targetresult);
327 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
328 m_MLPs.push_back(
GRLMLP(nNodes, targetVars, outputScale));
333float sim_ap_fixed(
float val,
int total_bits = 12,
int int_bits = 12,
334 bool round =
true,
bool wrap =
true)
336 int frac_bits = total_bits - int_bits;
337 float scale = std::pow(2, frac_bits);
340 float scaled_val = val * scale;
341 int fixed_val = round ? std::round(scaled_val) :
std::floor(scaled_val);
343 int max_int = std::pow(2, total_bits) - 1;
348 raw_val = fixed_val & max_int;
351 raw_val = std::min(std::max(fixed_val, 0), max_int);
354 return raw_val / scale;
358float sim_fix_input_layer_t(
float val)
360 return sim_ap_fixed(val, 12, 12,
true,
true);
363std::vector<float> sim_dense_0_iq(
const std::vector<float>& input)
365 const std::vector<std::pair<int, int>> dense_0_iq_config = {
366 {12, 5}, {12, 4}, {10, 6}, {8, 4}, {8, 4}, {9, 5},
367 {6, 2}, {8, 3}, {6, 3}, {5, 4}, {7, 4}, {9, 5},
368 {8, 2}, {8, 2}, {6, 3}, {5, 3}, {8, 4}, {6, 2}
371 std::vector<float> output;
372 output.reserve(input.size());
373 for (
size_t i = 0; i < input.size(); ++i) {
374 int total_bits = dense_0_iq_config[i].first;
375 int int_bits = dense_0_iq_config[i].second;
376 output.push_back(sim_ap_fixed(input[i], total_bits, int_bits,
true,
true));
392 vector<float> weights = expert.getWeights();
393 vector<float> bias = expert.getBias();
394 vector<float> layerinput = input;
396 vector<float> layeroutput2 = {};
397 vector<float> layeroutput3 = {};
398 vector<float> layeroutput4 = {};
401 for (
size_t i = 0; i < layerinput.size(); ++i) {
402 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
404 layeroutput2.clear();
405 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
407 unsigned num_inputs = layerinput.size();
408 unsigned num_neurons = expert.getNumberOfNodesLayer(2);
409 for (
unsigned io = 0; io < num_neurons; ++io) {
410 float bias_raw = bias[io];
411 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
412 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
413 layeroutput2[io] = bias_contrib;
418 for (
unsigned ii = 0; ii < num_inputs; ++ii) {
419 float input_val = layerinput[ii];
420 for (
unsigned io = 0; io < num_neurons; ++io) {
421 float weight_raw = weights[iw];
422 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
423 float product = input_val * weight_fixed;
424 float contrib = sim_fix_dense_0_accum_t(product);
426 layeroutput2[io] += contrib;
435 std::vector<float> layeroutput2_fixed_relu(num_neurons);
437 for (
unsigned io = 0; io < num_neurons; ++io) {
439 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
442 float relu_val = (fixed_val > 0) ? fixed_val : 0;
444 layeroutput2_fixed_relu[io] = relu_val;
448 std::vector<float> dense1_input(64);
449 for (
unsigned i = 0; i < 64; ++i) {
450 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
453 layeroutput3.clear();
454 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
455 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
456 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
457 for (
unsigned io = 64; io < num_neurons_1 + 64; ++io) {
458 float bias_raw = bias[io];
459 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
460 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
461 layeroutput3[io - 64] = bias_contrib;
466 for (
unsigned ii = 0; ii < num_inputs_1; ++ii) {
467 float input_val = dense1_input[ii];
468 for (
unsigned io = 0; io < num_neurons_1; ++io) {
470 float weight_raw = weights[iw];
472 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
473 float product = input_val * weight_fixed;
474 float contrib = sim_fix_dense_1_accum_t(product);
476 layeroutput3[io] += contrib;
482 std::vector<float> layeroutput3_fixed_relu(num_neurons);
485 for (
unsigned io = 0; io < num_neurons_1; ++io) {
486 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
488 float relu_val = (fixed_val > 0) ? fixed_val : 0;
490 layeroutput3_fixed_relu[io] = relu_val;
493 std::vector<float> dense2_input(64);
494 for (
unsigned i = 0; i < 64; ++i) {
495 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
497 layeroutput4.clear();
498 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
500 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
501 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
502 for (
unsigned io = 128; io < num_neurons_2 + 128; ++io) {
503 float bias_raw = bias[io];
504 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
505 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
506 layeroutput4[io - 128] = bias_contrib;
510 for (
unsigned ii = 0; ii < num_inputs_2; ++ii) {
511 float input_val = dense2_input[ii];
512 for (
unsigned io = 0; io < num_neurons_2; ++io) {
513 float weight_raw = weights[iw];
514 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
515 float product = input_val * weight_fixed;
516 float contrib = sim_fix_dense_2_accum_t(product);
518 layeroutput4[io] += contrib;
523 return layeroutput4[0];
528bool GRLNeuro::load(
unsigned isector,
const string& weightfilename,
const string& biasfilename)
530 if (weightfilename.size() < 1) {
531 B2ERROR(
"Could not load Neurotrigger weights from database!");
533 }
else if (biasfilename.size() < 1) {
534 B2ERROR(
"Could not load Neurotrigger bias from database!");
537 std::ifstream wfile(weightfilename);
538 if (!wfile.is_open()) {
539 B2WARNING(
"Could not open file " << weightfilename);
542 std::ifstream bfile(biasfilename);
543 if (!bfile.is_open()) {
544 B2WARNING(
"Could not open file " << biasfilename);
548 std::vector<float> warray;
549 std::vector<float> barray;
554 while (wfile >> element) {
555 warray.push_back(element);
557 while (bfile >> element) {
558 barray.push_back(element);
561 if (warray.size() != expert.nWeightsCal()) {
562 B2ERROR(
"Number of weights is not equal to registered architecture!");
564 }
else expert.setWeights(warray);
565 if (barray.size() != expert.nBiasCal()) {
566 B2ERROR(
"Number of bias is not equal to registered architecture!");
570 expert.setWeights(warray);
571 expert.setBias(barray);
578bool GRLNeuro::load(
unsigned isector, std::vector<float> warray, std::vector<float> barray)
581 expert.setWeights(warray);
582 expert.setBias(barray);
Class to keep all parameters of an expert MLP for the neuro trigger.
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
bool load(unsigned isector, const std::string &wfilename, const std::string &bfilename)
Load MLPs from file.
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
std::vector< GRLMLP > m_MLPs
List of networks.
Abstract base class for different kinds of events.
Struct to keep neurotrigger parameters.