9#include "trg/grl/modules/trggrlneuralnet/GRLNeuroTrainerModule.h"
11#include <parallel_fann.hpp>
16#include <framework/datastore/StoreArray.h>
17#include <framework/datastore/StoreObjPtr.h>
18#include <mdst/dataobjects/MCParticle.h>
19#include <tracking/dataobjects/RecoTrack.h>
20#include <framework/datastore/StoreObjPtr.h>
21#include <framework/dataobjects/EventMetaData.h>
22#include <framework/core/ModuleParam.templateDetails.h>
23#include <analysis/utility/PCmsLabTransform.h>
24#include <trg/cdc/dataobjects/CDCTriggerTrack.h>
25#include <trg/ecl/TrgEclMapping.h>
26#include <trg/ecl/dataobjects/TRGECLCluster.h>
27#include <trg/ecl/dataobjects/TRGECLTrg.h>
28#include <mdst/dataobjects/SoftwareTriggerResult.h>
29#include <trg/grl/dataobjects/GRLMLPData.h>
30#include "trg/grl/dataobjects/TRGGRLUnpackerStore.h"
32#include <cdc/geometry/CDCGeometryPar.h>
33#include <framework/gearbox/Unit.h>
49 "The NeuroTriggerTrainer module of the GRL.\n"
50 "Takes CDC track and ECL cluster to prepare input data\n"
51 "for the training of a neural network.\n"
52 "Networks are trained after the event loop and saved."
56 "Name of the StoreArray holding the information of trigger ecl clusters ",
57 string(
"TRGECLClusters"));
59 "Name of the StoreArray holding the tracks made by the 2D finder to be used as input.",
60 string(
"TRGCDC2DFinderTracks"));
62 "Name of the StoreArray holding the tracks made by the GRL to be used as input.",
63 string(
"TRGGRLUnpackerStore"));
65 "Name of the root file where the NeuroTrigger parameters will be saved.",
66 string(
"GRLNeuroTrigger.root"));
68 "Name of the root file where the generated training samples will be saved.",
69 string(
"GRLNeuroTrigger.root"));
71 "Name of the TObjArray to hold the NeuroTrigger parameters.",
74 "Name of the TObjArray to hold the training samples.",
77 "If true, save parameter distribution of training data "
78 "in train file and training curve in log file.",
true);
80 "Switch to load saved parameters if existing. "
81 "Take care not to duplicate training sets!",
false);
90 "#cdc track of expert MLPs.",
m_parameters.i_cdc_sector);
92 "#ecl cluster of expert MLPs.",
m_parameters.i_ecl_sector);
94 "Number of nodes in each hidden layer for all networks "
95 "or factor to multiply with number of inputs (1 list or nMLP lists). "
98 "If true, multiply nHidden with number of input nodes.",
101 "Output scale for all networks (1 value list or nMLP value lists). "
102 "Output[i] of the MLP is scaled from [-1, 1] "
103 "to [outputScale[2*i], outputScale[2*i+1]]. "
106 "Minimal number of training samples "
107 "or factor to multiply with number of weights. "
108 "If the minimal number of samples is not reached, "
109 "all samples are saved but no training is started.", 10.);
111 "Maximal number of training samples "
112 "or factor to multiply with number of weights. "
113 "When the maximal number of samples is reached, "
114 "no further samples are added.", 10.);
116 "If true, multiply nTrainMin and nTrainMax with number of weights.",
119 "Number of validation samples for training.", 1000);
121 "Number of test samples to get resolution after training.", 5000);
123 "Weights are limited to [-wMax, wMax] after each training epoch "
124 "(for convenience of the FPGA implementation).",
127 "Number of threads for parallel training.", 1);
129 "Training is stopped if validation error is higher than "
130 "checkInterval epochs ago, i.e. either the validation error is increasing "
131 "or the gain is less than the fluctuations.", 500);
133 "Maximum number of training epochs.", 10000);
135 "If >1, training is repeated several times with different start weights. "
136 "The weights which give the best resolution on the test samples are kept.", 1);
149 for (
unsigned iMLP = 0; iMLP < (unsigned)
n_sector; ++iMLP) {
155 B2WARNING(
"nTrainMin set to " <<
m_nTrainMin <<
" (was larger than nTrainMax)");
159 for (
int isector = 0; isector <
n_sector; isector++) {
160 h_cdc2d_phi_sig .push_back(
new TH1D((
"h_cdc2d_phi_sig_" + to_string(isector)).c_str(),
161 (
"h_cdc2d_phi_sig_" + to_string(isector)).c_str(), 64, -3.2, 3.2));
162 h_cdc2d_pt_sig .push_back(
new TH1D((
"h_cdc2d_pt_sig_" + to_string(isector)).c_str(),
163 (
"h_cdc2d_pt_sig_" + to_string(isector)).c_str(), 100, -5, 5));
164 h_selTheta_sig.push_back(
new TH1D((
"h_selTheta_sig_" + to_string(isector)).c_str(),
165 (
"h_selTheta_sig_" + to_string(isector)).c_str(), 64, -3.2, 3.2));
166 h_selPhi_sig .push_back(
new TH1D((
"h_selPhi_sig_" + to_string(isector)).c_str(), (
"h_selPhi_sig_" + to_string(isector)).c_str(),
168 h_selE_sig .push_back(
new TH1D((
"h_selE_sig_" + to_string(isector)).c_str(), (
"h_selE_sig_" + to_string(isector)).c_str(),
170 h_result_sig .push_back(
new TH1D((
"h_result_sig_" + to_string(isector)).c_str(), (
"h_result_sig_" + to_string(isector)).c_str(),
172 h_cdc2d_phi_bg .push_back(
new TH1D((
"h_cdc2d_phi_bg_" + to_string(isector)).c_str(),
173 (
"h_cdc2d_phi_bg_" + to_string(isector)).c_str(), 64, -3.2, 3.2));
174 h_cdc2d_pt_bg .push_back(
new TH1D((
"h_cdc2d_pt_bg_" + to_string(isector)).c_str(),
175 (
"h_cdc2d_pt_bg_" + to_string(isector)).c_str(), 100, -5, 5));
176 h_selTheta_bg .push_back(
new TH1D((
"h_selTheta_bg_" + to_string(isector)).c_str(), (
"h_selTheta_bg_" + to_string(isector)).c_str(),
178 h_selPhi_bg .push_back(
new TH1D((
"h_selPhi_bg_" + to_string(isector)).c_str(), (
"h_selPhi_bg_" + to_string(isector)).c_str(),
180 h_selE_bg .push_back(
new TH1D((
"h_selE_bg_" + to_string(isector)).c_str(), (
"h_selE_bg_" + to_string(isector)).c_str(),
182 h_result_bg .push_back(
new TH1D((
"h_result_bg_" + to_string(isector)).c_str(), (
"h_result_bg_" + to_string(isector)).c_str(),
185 h_ncdcf_sig.push_back(
new TH1D(
"h_ncdcf_sig",
"h_ncdcf_sig", 10, 0, 10));
186 h_ncdcs_sig.push_back(
new TH1D(
"h_ncdcs_sig",
"h_ncdcs_sig", 10, 0, 10));
187 h_ncdci_sig.push_back(
new TH1D(
"h_ncdci_sig",
"h_ncdci_sig", 10, 0, 10));
188 h_ncdc_sig.push_back(
new TH1D(
"h_ncdc_sig",
"h_ncdc_sig", 10, 0, 10));
189 h_necl_sig.push_back(
new TH1D(
"h_necl_sig",
"h_necl_sig", 10, 0, 10));
190 h_ncdcf_bg.push_back(
new TH1D(
"h_ncdcf_bg",
"h_ncdcf_bg", 10, 0, 10));
191 h_ncdcs_bg.push_back(
new TH1D(
"h_ncdcs_bg",
"h_ncdcs_bg", 10, 0, 10));
192 h_ncdci_bg.push_back(
new TH1D(
"h_ncdci_bg",
"h_ncdci_bg", 10, 0, 10));
193 h_ncdc_bg.push_back(
new TH1D(
"h_ncdc_bg",
"h_ncdc_bg", 10, 0, 10));
194 h_necl_bg.push_back(
new TH1D(
"h_necl_bg",
"h_necl_bg", 10, 0, 10));
198 for (
int tc = 1; tc <= 576; tc++) {
205 for (
int tc = 1; tc <= 576; tc++) {
208 ROOT::Math::XYZVector CellPosition = trgecl_obj->
getTCPosition(tc);
209 ROOT::Math::PxPyPzEVector CellLab;
210 CellLab.SetPx(CellPosition.Unit().X());
211 CellLab.SetPy(CellPosition.Unit().Y());
212 CellLab.SetPz(CellPosition.Unit().Z());
216 TCPhiLab.push_back(CellPosition.Phi());
217 double tantheta = tan(CellPosition.Theta());
218 TCcotThetaLab.push_back(1. / tantheta);
221 ROOT::Math::PxPyPzEVector CellCOM = boostrotate.
rotateLabToCms() * CellLab;
222 TCThetaCOM.push_back(CellCOM.Theta()*TMath::RadToDeg());
223 TCPhiCOM.push_back(CellCOM.Phi()*TMath::RadToDeg());
226 TC1GeV.push_back(1. / CellCOM.E());
236 std::vector<float> input;
237 std::vector<float> output;
240 std::vector<float> cdc2d_phi;
241 std::vector<float> cdc2d_pt;
252 for (
int i = 0; i < 36; i++) {
259 for (
int i = 0; i < 36; i++) {
260 if (GRLStore->get_phi_CDC(i)) {
266 for (
int i = 0; i < 64; i++) {
267 if (GRLStore->get_map_ST2(i)) {
268 int j = i * (36. / 64.);
274 for (
int i = 0; i < 64; i++) {
275 if (GRLStore->get_map_TSF0(i)) {
276 int j = i * (36. / 64.);
278 if (j1 < 0) j1 = j1 + 64;
280 if (j2 < 0) j2 = j2 + 64;
282 if (j3 < 0) j3 = j3 + 64;
284 if (j4 < 0) j4 = j4 + 64;
287 if (j6 > 63) j6 = j6 - 64;
289 if (j7 > 63) j7 = j7 - 64;
291 (GRLStore->get_map_TSF1(j1) || GRLStore->get_map_TSF1(j2) || GRLStore->get_map_TSF1(j3) || GRLStore->get_map_TSF1(j4)
292 || GRLStore->get_map_TSF1(j5))
294 (GRLStore->get_map_TSF2(j3) || GRLStore->get_map_TSF2(j4) || GRLStore->get_map_TSF2(j5) || GRLStore->get_map_TSF2(j6)
295 || GRLStore->get_map_TSF2(j7))
302 for (
int i = 0; i < 36; i++) {
303 if (map_cdcf[i] == 1) {
305 if (i1 < 0) i1 = i1 + 36;
307 if (i2 < 0) i2 = i2 + 36;
311 if (i4 > 36) i4 = i4 - 36;
313 if (i5 > 36) i5 = i5 - 36;
326 for (
int i = 0; i < 36; i++) {
327 if (map_cdcs[i] == 1) {
329 if (i1 < 0) i1 = i1 + 36;
331 if (i2 < 0) i2 = i2 + 36;
335 if (i4 > 36) i4 = i4 - 36;
337 if (i5 > 36) i5 = i5 - 36;
361 for (
int i = 0; i < 36; i++) {
362 if (map_cdcf[i] == 1) {n_cdcf++; n_cdc++;}
363 if (map_cdcs[i] == 1) {n_cdcs++; n_cdc++;}
364 if (map_cdci[i] == 1) {n_cdci++; n_cdc++;}
368 for (
int i = 0; i < 36; i++) {
369 input.push_back((map_cdcf[i] - 0.5) * 2);
371 for (
int i = 0; i < 36; i++) {
372 input.push_back((map_cdcs[i] - 0.5) * 2);
374 for (
int i = 0; i < 36; i++) {
375 input.push_back((map_cdci[i] - 0.5) * 2);
383 double EventTiming = -9999.;
384 if (ntrgArray > 0) {EventTiming = trgArray[0]->getEventTiming();}
385 std::vector<int> selTC;
386 std::vector<float> selTheta;
387 std::vector<float> selPhi;
388 std::vector<float> selE;
389 for (
int ic = 0; ic < eclTrgClusterArray.
getEntries(); ic++) {
390 double tcT = abs(eclTrgClusterArray[ic]->getTimeAve() - EventTiming);
392 int TC = eclTrgClusterArray[ic]->getMaxTCId();
394 selTheta.push_back(TCcotThetaLab[TC - 1]);
395 selPhi.push_back(TCPhiLab[TC - 1]);
396 selE.push_back(eclTrgClusterArray[ic]->getEnergyDep() * 0.001);
397 input.push_back(TCcotThetaLab[TC - 1] / TMath::Pi());
398 input.push_back(TCPhiLab[TC - 1] / TMath::Pi());
399 input.push_back((eclTrgClusterArray[ic]->getEnergyDep() * 0.001 - 3.5) / 3.5);
401 B2DEBUG(50,
"InputECL " << ic <<
" " << tcT <<
" " << TC <<
" " << TCcotThetaLab[TC - 1] <<
" " << TCPhiLab[TC - 1] <<
" " <<
402 eclTrgClusterArray[ic]->getEnergyDep() <<
" " << EventTiming);
406 bool accepted_signal =
false;
407 bool accepted_bg =
false;
408 bool accepted_hadron =
false;
409 bool accepted_filter =
false;
410 bool accepted_bhabha =
false;
413 const std::map<std::string, int>& skim_map = result_soft->getResults();
414 if (skim_map.find(
"software_trigger_cut&skim&accept_hadronb2") != skim_map.end()) {
417 if (skim_map.find(
"software_trigger_cut&filter&total_result") != skim_map.end()) {
420 if (skim_map.find(
"software_trigger_cut&skim&accept_bhabha") != skim_map.end()) {
425 accepted_signal = accepted_hadron && accepted_filter;
426 accepted_bg = !accepted_filter;
429 int cdc_sector = cdc2d_phi.size();
430 int ecl_sector = selTC.size();
432 B2DEBUG(50,
"Input " << cdc_sector <<
" " << ecl_sector <<
" " << accepted_signal <<
" " << accepted_bg);
434 && !accepted_filter)B2DEBUG(50,
"Input " << cdc_sector <<
" " << ecl_sector <<
" " << accepted_signal <<
" " << accepted_filter <<
435 " " << accepted_bhabha);
437 if (accepted_signal) {
439 }
else if (accepted_bg) {
443 output.push_back(-1);
449 output.push_back(-1);
453 output.push_back(-1);
464 if (accepted_signal) {
465 for (
int i = 0; i < cdc_sector; i++)
h_cdc2d_phi_sig[isector]->Fill(cdc2d_phi[i]);
466 for (
int i = 0; i < cdc_sector; i++) h_cdc2d_pt_sig[isector]->Fill(cdc2d_pt[i]);
467 for (
int i = 0; i < ecl_sector; i++) h_selTheta_sig[isector]->Fill(selTheta[i]);
468 for (
int i = 0; i < ecl_sector; i++) h_selPhi_sig[isector]->Fill(selPhi[i]);
469 for (
int i = 0; i < ecl_sector; i++) h_selE_sig[isector]->Fill(selE[i]);
470 h_ncdcf_sig[0]->Fill(n_cdcf);
471 h_ncdcs_sig[0]->Fill(n_cdcs);
472 h_ncdci_sig[0]->Fill(n_cdci);
473 h_ncdc_sig[0]->Fill(n_cdc);
474 h_necl_sig[0]->Fill(ecl_sector);
475 }
else if (accepted_bg) {
476 for (
int i = 0; i < cdc_sector; i++) h_cdc2d_phi_bg[isector]->Fill(cdc2d_phi[i]);
477 for (
int i = 0; i < cdc_sector; i++) h_cdc2d_pt_bg[isector]->Fill(cdc2d_pt[i]);
478 for (
int i = 0; i < ecl_sector; i++) h_selTheta_bg[isector]->Fill(selTheta[i]);
479 for (
int i = 0; i < ecl_sector; i++) h_selPhi_bg[isector]->Fill(selPhi[i]);
480 for (
int i = 0; i < ecl_sector; i++) h_selE_bg[isector]->Fill(selE[i]);
481 h_ncdcf_bg[0]->Fill(n_cdcf);
482 h_ncdcs_bg[0]->Fill(n_cdcs);
483 h_ncdci_bg[0]->Fill(n_cdci);
484 h_ncdc_bg[0]->Fill(n_cdc);
485 h_necl_bg[0]->Fill(ecl_sector);
502 B2WARNING(
"Not enough training samples for sector " << isector <<
" (" << (nTrainMin +
m_nValid +
m_nTest)
503 <<
" requested, " <<
m_trainSets[isector].getNumberOfSamples() <<
" found)");
522 B2INFO(
"Training network for sector " << isector <<
" with OpenMP");
524 B2INFO(
"Training network for sector " << isector <<
" without OpenMP");
527 unsigned nLayers =
m_GRLNeuro[isector].getNumberOfLayers();
528 unsigned* nNodes =
new unsigned[nLayers];
529 for (
unsigned il = 0; il < nLayers; ++il) {
530 nNodes[il] =
m_GRLNeuro[isector].getNumberOfNodesLayer(il);
532 struct fann* ann = fann_create_standard_array(nLayers, nNodes);
537 struct fann_train_data* train_data =
538 fann_create_train(nTrain, nNodes[0], nNodes[nLayers - 1]);
539 for (
unsigned i = 0; i < nTrain; ++i) {
540 vector<float> input = currentData.
getInput(i);
541 for (
unsigned j = 0; j < input.size(); ++j) {
542 train_data->input[i][j] = input[j];
544 vector<float> target = currentData.
getTarget(i);
545 for (
unsigned j = 0; j < target.size(); ++j) {
546 train_data->output[i][j] = target[j];
550 struct fann_train_data* valid_data =
551 fann_create_train(
m_nValid, nNodes[0], nNodes[nLayers - 1]);
552 for (
unsigned i = nTrain; i < nTrain +
m_nValid; ++i) {
553 vector<float> input = currentData.
getInput(i);
554 for (
unsigned j = 0; j < input.size(); ++j) {
555 valid_data->input[i - nTrain][j] = input[j];
557 vector<float> target = currentData.
getTarget(i);
558 for (
unsigned j = 0; j < target.size(); ++j) {
559 valid_data->output[i - nTrain][j] = target[j];
563 fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
564 fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
565 fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
567 vector<double> trainOptLog = {};
568 vector<double> validOptLog = {};
571 double bestValid = 999.;
572 vector<double> trainLog = {};
573 vector<double> validLog = {};
578 vector<fann_type> bestWeights = {};
579 bestWeights.assign(
m_GRLNeuro[isector].getNumberOfWeights(), 0.);
580 fann_randomize_weights(ann, -0.1, 0.1);
582 for (
int epoch = 1; epoch <=
m_maxEpochs; ++epoch) {
584 double mse = parallel_fann::train_epoch_irpropm_parallel(ann, train_data,
m_nThreads);
586 double mse = fann_train_epoch(ann, train_data);
588 trainLog[epoch - 1] = mse;
590 for (
unsigned iw = 0; iw < ann->total_connections; ++iw) {
591 if (ann->weights[iw] >
m_wMax)
592 ann->weights[iw] =
m_wMax;
593 else if (ann->weights[iw] < -
m_wMax)
594 ann->weights[iw] = -
m_wMax;
599 double valid_mse = parallel_fann::test_data_parallel(ann, valid_data,
m_nThreads);
601 double valid_mse = fann_test_data(ann, valid_data);
603 validLog[epoch - 1] = valid_mse;
605 if (valid_mse < bestValid) {
606 bestValid = valid_mse;
607 for (
unsigned iw = 0; iw < ann->total_connections; ++iw) {
608 bestWeights[iw] = ann->weights[iw];
614 B2INFO(
"Training run " << irun <<
" stopped in epoch " << epoch);
615 B2INFO(
"Train error: " << mse <<
", valid error: " << valid_mse <<
616 ", best valid: " << bestValid);
621 if (epoch == 1 || (epoch < 100 && epoch % 10 == 0) || epoch % 100 == 0) {
622 B2INFO(
"Epoch " << epoch <<
": Train error = " << mse <<
623 ", valid error = " << valid_mse <<
", best valid = " << bestValid);
626 if (breakEpoch == 0) {
627 B2INFO(
"Training run " << irun <<
" finished in epoch " <<
m_maxEpochs);
630 trainOptLog.push_back(trainLog[bestEpoch - 1]);
631 validOptLog.push_back(validLog[bestEpoch - 1]);
632 vector<float> oldWeights =
m_GRLNeuro[isector].getWeights();
638 vector<float> target =
m_trainSets[isector].getTarget(i);
640 if (((
int)target[0]) == 1)h_result_sig[isector]->Fill(output);
641 else h_result_bg[isector]->Fill(output);
646 fann_destroy_train(train_data);
647 fann_destroy_train(valid_data);
655 B2INFO(
"Saving traindata to file " << filename <<
", array " << arrayname);
656 TFile datafile(filename.c_str(),
"RECREATE");
658 for (
int isector = 0; isector <
n_sector; ++isector) {
662 h_cdc2d_pt_sig[isector]->Write();
663 h_selTheta_sig[isector]->Write();
664 h_selPhi_sig[isector]->Write();
665 h_selE_sig[isector]->Write();
666 h_result_sig[isector]->Write();
667 h_cdc2d_phi_bg[isector]->Write();
668 h_cdc2d_pt_bg[isector]->Write();
669 h_selTheta_bg[isector]->Write();
670 h_selPhi_bg[isector]->Write();
671 h_selE_bg[isector]->Write();
672 h_result_bg[isector]->Write();
674 h_ncdcf_sig[0]->Write();
675 h_ncdcs_sig[0]->Write();
676 h_ncdci_sig[0]->Write();
677 h_ncdc_sig[0]->Write();
678 h_necl_sig[0]->Write();
679 h_ncdcf_bg[0]->Write();
680 h_ncdcs_bg[0]->Write();
681 h_ncdci_bg[0]->Write();
682 h_ncdc_bg[0]->Write();
683 h_necl_bg[0]->Write();
689 for (
int isector = 0; isector <
n_sector; ++ isector) {
691 delete h_cdc2d_pt_sig[isector];
692 delete h_selTheta_sig[isector];
693 delete h_selPhi_sig[isector];
694 delete h_selE_sig[isector];
695 delete h_result_sig[isector];
696 delete h_cdc2d_phi_bg[isector];
697 delete h_cdc2d_pt_bg[isector];
698 delete h_selTheta_bg[isector];
699 delete h_selPhi_bg[isector];
700 delete h_selE_bg[isector];
701 delete h_result_bg[isector];
703 delete h_ncdcf_sig[0];
704 delete h_ncdcs_sig[0];
705 delete h_ncdci_sig[0];
706 delete h_ncdc_sig[0];
707 delete h_necl_sig[0];
708 delete h_ncdcf_bg[0];
709 delete h_ncdcs_bg[0];
710 delete h_ncdci_bg[0];
714 h_cdc2d_pt_sig.clear();
715 h_selTheta_sig.clear();
716 h_selPhi_sig.clear();
718 h_result_sig.clear();
719 h_cdc2d_phi_bg.clear();
720 h_cdc2d_pt_bg.clear();
721 h_selTheta_bg.clear();
Struct for training data of a single MLP for the neuro trigger.
const std::vector< float > & getInput(unsigned i) const
get input vector of sample i
const std::vector< float > & getTarget(unsigned i) const
get target value of sample i
int m_maxEpochs
Maximal number of training epochs.
GRLNeuroTrainerModule()
Constructor, for setting module description and parameters.
int m_nValid
Number of validation samples.
bool m_load
Switch to load saved parameters from a previous run.
virtual void initialize() override
Initialize the module.
std::string m_TrgECLClusterName
Name of the StoreArray containing the ECL clusters.
int m_checkInterval
Training is stopped if validation error is higher than checkInterval epochs ago, i....
GRLNeuro m_GRLNeuro
Instance of the NeuroTrigger.
virtual void event() override
Called once for each event.
int n_cdc_sector
Number of CDC sectors.
GRLNeuro::Parameters m_parameters
Parameters for the NeuroTrigger.
double m_wMax
Limit for weights.
std::string m_arrayname
Name of the TObjArray holding the networks.
virtual void terminate() override
Do the training for all sectors.
bool m_multiplyNTrain
Switch to multiply number of samples with number of weights.
int m_nThreads
Number of threads for training.
int m_nTest
Number of test samples.
std::string m_GRLCollectionName
Name of the StoreObj containing the input GRL.
std::string m_trainFilename
Name of file where training samples are stored.
void train(unsigned isector)
Train a single MLP.
std::string m_2DfinderCollectionName
Name of the StoreArray containing the input 2D tracks.
int n_ecl_sector
Number of ECL sectors.
std::vector< TH1D * > h_cdc2d_phi_sig
Histograms for monitoring.
double m_nTrainMin
Minimal number of training samples.
std::string m_filename
Name of file where network weights etc.
std::vector< GRLMLPData > m_trainSets
Sets of training data for all sectors.
void saveTraindata(const std::string &filename, const std::string &arrayname="trainSets")
Save all training samples.
std::vector< int > scale_bg
BG scale factor for training.
double m_nTrainMax
Maximal number of training samples.
bool m_saveDebug
If true, save training curve and parameter distribution of training data.
std::string m_trainArrayname
Name of the TObjArray holding the training samples.
int n_sector
Number of Total sectors.
int m_repeatTrain
Number of training runs with different random start weights.
void initialize(const Parameters &p)
Set parameters and get some network independent parameters.
float runMLP(unsigned isector, const std::vector< float > &input)
Run an expert MLP.
unsigned nSectors() const
return number of neural networks
void setDescription(const std::string &description)
Sets the description of the module.
Accessor to arrays stored in the data store.
int getEntries() const
Get the number of objects in the array.
Type-safe access to single objects in the data store.
bool isValid() const
Check whether the object was created.
int getTCThetaIdFromTCId(int)
get [TC Theta ID] from [TC ID]
ROOT::Math::XYZVector getTCPosition(int)
TC position (cm)
void addParam(const std::string &name, T ¶mVariable, const std::string &description, const T &defaultValue)
Adds a new parameter to the module.
#define REG_MODULE(moduleName)
Register the given module (without 'Module' suffix) with the framework.
@ c_accept
Accept this event.
Abstract base class for different kinds of events.
std::vector< std::vector< float > > outputScale
Output scale for all networks.
bool multiplyHidden
If true, multiply nHidden with number of input nodes.
unsigned nMLP
Number of networks.
unsigned n_ecl_sector
Number of ECL sectors.
std::vector< std::vector< float > > nHidden
Number of nodes in each hidden layer for all networks or factor to multiply with number of inputs.
unsigned n_cdc_sector
Number of CDC sectors.