1 #include <framework/logging/Logger.h>
2 #include <trg/grl/GRLNeuro.h>
3 #include <trg/grl/dataobjects/GRLMLP.h>
5 #include <cdc/geometry/CDCGeometryPar.h>
6 #include <framework/gearbox/Const.h>
7 #include <framework/gearbox/Unit.h>
8 #include <framework/datastore/StoreObjPtr.h>
9 #include <framework/datastore/StoreArray.h>
10 #include <trg/cdc/dbobjects/CDCTriggerNeuroConfig.h>
11 #include <trg/cdc/dataobjects/CDCTriggerTrack.h>
26 if (p.nHidden.size() != 1 && p.nHidden.size() != p.nMLP) {
27 B2ERROR(
"Number of nHidden lists should be 1 or " << p.nMLP);
30 if (p.outputScale.size() != 1 && p.outputScale.size() != p.nMLP) {
31 B2ERROR(
"Number of outputScale lists should be 1 or " << p.nMLP);
35 unsigned short nTarget = int(p.targetresult);
37 B2ERROR(
"No outputs! Turn on targetresult.");
40 for (
unsigned iScale = 0; iScale < p.outputScale.size(); ++iScale) {
41 if (p.outputScale[iScale].size() != 2 * nTarget) {
42 B2ERROR(
"outputScale should be exactly " << 2 * nTarget <<
" values");
51 for (
unsigned iMLP = 0; iMLP < p.nMLP; ++iMLP) {
56 unsigned short nInput = p.i_cdc_sector[iMLP] + p.i_ecl_sector[iMLP];
57 vector<float> nHidden = (p.nHidden.size() == 1) ? p.nHidden[0] : p.nHidden[iMLP];
58 vector<unsigned short> nNodes = {nInput};
59 for (
unsigned iHid = 0; iHid < nHidden.size(); ++iHid) {
60 if (p.multiplyHidden) {
61 nNodes.push_back(nHidden[iHid] * nNodes[0]);
63 nNodes.push_back(nHidden[iHid]);
66 nNodes.push_back(nTarget);
67 unsigned short targetVars = int(p.targetresult);
69 vector<float> outputScale = (p.outputScale.size() == 1) ? p.outputScale[0] : p.outputScale[iMLP];
71 m_MLPs.push_back(
GRLMLP(nNodes, targetVars, outputScale));
108 const GRLMLP& expert = m_MLPs[isector];
109 vector<float> weights = expert.getWeights();
110 vector<float> layerinput = input;
111 vector<float> layeroutput = {};
113 for (
unsigned il = 1; il < expert.nLayers(); ++il) {
115 layerinput.push_back(1.);
118 layeroutput.assign(expert.nNodesLayer(il), 0.);
120 for (
unsigned io = 0; io < layeroutput.size(); ++io) {
122 for (
unsigned ii = 0; ii < layerinput.size(); ++ii) {
123 layeroutput[io] += layerinput[ii] * weights[iw++];
126 layeroutput[io] = tanh(layeroutput[io] / 2.);
129 layerinput = layeroutput;
137 unsigned precisionInput = m_precision[3];
138 unsigned precisionWeights = m_precision[4];
139 unsigned precisionLUT = m_precision[5];
140 unsigned precisionTanh = m_precision[3];
141 unsigned dp = precisionInput + precisionWeights - precisionLUT;
143 const GRLMLP& expert = m_MLPs[isector];
145 vector<long> inputFix(input.size(), 0);
146 for (
unsigned ii = 0; ii < input.size(); ++ii) {
147 inputFix[ii] = long(input[ii] * (1 << precisionInput));
150 vector<float> weights = expert.getWeights();
151 vector<long> weightsFix(weights.size(), 0);
152 for (
unsigned iw = 0; iw < weights.size(); ++iw) {
153 weightsFix[iw] = long(round(weights[iw] * (1 << precisionWeights)));
156 unsigned xMax = unsigned(ceil(atanh(1. - 1. / (1 << (precisionTanh + 1))) *
157 (1 << (precisionLUT + 1))));
160 vector<long> layerinput = inputFix;
161 vector<long> layeroutput = {};
163 for (
unsigned il = 1; il < expert.nLayers(); ++il) {
165 layerinput.push_back(1 << precisionInput);
168 layeroutput.assign(expert.nNodesLayer(il), 0);
170 for (
unsigned io = 0; io < layeroutput.size(); ++io) {
172 for (
unsigned ii = 0; ii < layerinput.size(); ++ii) {
173 layeroutput[io] += layerinput[ii] * weightsFix[iw++];
176 unsigned long bin = abs(layeroutput[io]) >> dp;
178 float x = (bin + 0.5 - 1. / (1 << (dp + 1))) / (1 << precisionLUT);
179 long tanhLUT = (bin < xMax) ?
long(round(tanh(x / 2.) * (1 << precisionTanh))) : (1 << precisionTanh);
180 layeroutput[io] = (layeroutput[io] < 0) ? -tanhLUT : tanhLUT;
183 layerinput = layeroutput;
187 vector<float> output(layeroutput.size(), 0.);
188 for (
unsigned io = 0; io < output.size(); ++io) {
189 output[io] = layeroutput[io] / float(1 << precisionTanh);
197 B2INFO(
"Saving networks to file " << filename <<
", array " << arrayname);
198 TFile datafile(filename.c_str(),
"UPDATE");
199 TObjArray* MLPs =
new TObjArray(m_MLPs.size());
200 for (
unsigned isector = 0; isector < m_MLPs.size(); ++isector) {
201 MLPs->Add(&m_MLPs[isector]);
203 MLPs->Write(arrayname.c_str(), TObject::kSingleKey | TObject::kOverwrite);