Run an expert MLP.
390{
391
392
393 const GRLMLP& expert =
m_MLPs[isector];
394 vector<float> weights = expert.getWeights();
395 vector<float> bias = expert.getBias();
396 vector<float> layerinput = input;
397
398 vector<float> layeroutput2 = {};
399 vector<float> layeroutput3 = {};
400 vector<float> layeroutput4 = {};
401
403 for (size_t i = 0; i < layerinput.size(); ++i) {
404 layerinput[i] = sim_fix_input_layer_t(layerinput[i]);
405 }
406 layeroutput2.clear();
407 layeroutput2.assign(expert.getNumberOfNodesLayer(2), 0.);
408
409 unsigned num_inputs = layerinput.size();
410 unsigned num_neurons = expert.getNumberOfNodesLayer(2);
411 for (unsigned io = 0; io < num_neurons; ++io) {
412 float bias_raw = bias[io];
413 float bias_fixed = sim_fix_dense_0_bias_t(bias_raw);
414 float bias_contrib = sim_fix_dense_0_accum_t(bias_fixed);
415 layeroutput2[io] = bias_contrib;
416 }
417
418 unsigned iw = 0;
419
420 for (unsigned ii = 0; ii < num_inputs; ++ii) {
421 float input_val = layerinput[ii];
422 for (unsigned io = 0; io < num_neurons; ++io) {
423 float weight_raw = weights[iw];
424 float weight_fixed = sim_fix_dense_0_weight_t(weight_raw);
425 float product = input_val * weight_fixed;
426 float contrib = sim_fix_dense_0_accum_t(product);
427
428 layeroutput2[io] += contrib;
429
430 ++iw;
431 }
432 }
433
434
435
436
437 std::vector<float> layeroutput2_fixed_relu(num_neurons);
438
439 for (unsigned io = 0; io < num_neurons; ++io) {
440
441 float fixed_val = sim_fix_dense_0_t(layeroutput2[io]);
442
443
444 float relu_val = (fixed_val > 0) ? fixed_val : 0;
445
446 layeroutput2_fixed_relu[io] = relu_val;
447
448 }
449
450 std::vector<float> dense1_input(64);
451 for (unsigned i = 0; i < 64; ++i) {
452 dense1_input[i] = sim_dense_1_input_quant(i, layeroutput2_fixed_relu[i]);
453 }
454
455 layeroutput3.clear();
456 layeroutput3.assign(expert.getNumberOfNodesLayer(1), 0.);
457 unsigned num_inputs_1 = layeroutput2_fixed_relu.size();
458 unsigned num_neurons_1 = expert.getNumberOfNodesLayer(2);
459 for (unsigned io = 64; io < num_neurons_1 + 64; ++io) {
460 float bias_raw = bias[io];
461 float bias_fixed = sim_fix_dense_1_bias_t(bias_raw);
462 float bias_contrib = sim_fix_dense_1_accum_t(bias_fixed);
463 layeroutput3[io - 64] = bias_contrib;
464
465 }
466
467
468 for (unsigned ii = 0; ii < num_inputs_1; ++ii) {
469 float input_val = dense1_input[ii];
470 for (unsigned io = 0; io < num_neurons_1; ++io) {
471
472 float weight_raw = weights[iw];
473
474 float weight_fixed = sim_fix_dense_1_weight_t(weight_raw);
475 float product = input_val * weight_fixed;
476 float contrib = sim_fix_dense_1_accum_t(product);
477
478 layeroutput3[io] += contrib;
479 ++iw;
480 }
481 }
482
483
484 std::vector<float> layeroutput3_fixed_relu(num_neurons);
485
486
487 for (unsigned io = 0; io < num_neurons_1; ++io) {
488 float fixed_val = sim_fix_dense_1_t(layeroutput3[io]);
489
490 float relu_val = (fixed_val > 0) ? fixed_val : 0;
491
492 layeroutput3_fixed_relu[io] = relu_val;
493
494 }
495 std::vector<float> dense2_input(64);
496 for (unsigned i = 0; i < 64; ++i) {
497 dense2_input[i] = sim_dense_2_input_quant(i, layeroutput3_fixed_relu[i]);
498 }
499 layeroutput4.clear();
500 layeroutput4.assign(expert.getNumberOfNodesLayer(3), 0.);
501
502 unsigned num_inputs_2 = layeroutput2_fixed_relu.size();
503 unsigned num_neurons_2 = expert.getNumberOfNodesLayer(3);
504 for (unsigned io = 128; io < num_neurons_2 + 128; ++io) {
505 float bias_raw = bias[io];
506 float bias_fixed = sim_fix_dense_2_bias_t(bias_raw);
507 float bias_contrib = sim_fix_dense_2_accum_t(bias_fixed);
508 layeroutput4[io - 128] = bias_contrib;
509
510 }
511
512 for (unsigned ii = 0; ii < num_inputs_2; ++ii) {
513 float input_val = dense2_input[ii];
514 for (unsigned io = 0; io < num_neurons_2; ++io) {
515 float weight_raw = weights[iw];
516 float weight_fixed = sim_fix_dense_2_weight_t(weight_raw);
517 float product = input_val * weight_fixed;
518 float contrib = sim_fix_dense_2_accum_t(product);
519
520 layeroutput4[io] += contrib;
521
522 ++iw;
523 }
524 }
525 return layeroutput4[0];
526
527}