Belle II Software development
simple.py
1#!/usr/bin/env python3
2
3
10
11import time
12
14import torch
15
16
17class myModel(torch.nn.Module):
18 """
19 My dense neural network
20 """
21
22 def __init__(self, number_of_features):
23 """
24 Init the network
25 param: number_of_features number of input variables
26 """
27 super(myModel, self).__init__()
28
29
30 self.network = torch.nn.Sequential(
31 torch.nn.Linear(number_of_features, 128),
32 torch.nn.ReLU(),
33 torch.nn.Linear(128, 128),
34 torch.nn.ReLU(),
35 torch.nn.Linear(128, 1),
36 torch.nn.Sigmoid(),
37 )
38
39 def forward(self, x):
40 """
41 Run the network
42 """
43 prob = self.network(x)
44 return prob
45
46
47def get_model(number_of_features, number_of_spectators, number_of_events, training_fraction, parameters):
48 """
49 Returns default torch model
50 """
51
52 # Since the move to storing weights only for torch we need to pass all arguments of
53 # get_model that are needed to reconstruct our state explicitly as kwargs to State!
54 state = State(
55 myModel(number_of_features).to("cpu"),
56 # In this case we need `number_of_features` and `parameters`
57 number_of_features=number_of_features,
58 parameters=parameters
59 )
60 print(state.model)
61
62 state.optimizer = torch.optim.SGD(state.model.parameters(), parameters.get('learning_rate', 1e-2))
63
64 # we recreate the loss function on each batch so that we can pass in the weights
65 state.loss_fn = torch.nn.BCELoss
66
67 # for book keeping
68 state.epoch = 0
69 state.avg_costs = []
70 return state
71
72
73if __name__ == "__main__":
74 from basf2 import conditions
75 import basf2_mva
76 import basf2_mva_util
77 import json
78
79 # NOTE: do not use testing payloads in production! Any results obtained like this WILL NOT BE PUBLISHED
80 conditions.testing_payloads = [
81 'localdb/database.txt'
82 ]
83
84 general_options = basf2_mva.GeneralOptions()
85 general_options.m_datafiles = basf2_mva.vector("train.root")
86 general_options.m_identifier = "Simple"
87 general_options.m_treename = "tree"
88 variables = ['M', 'p', 'pt', 'pz',
89 'daughter(0, p)', 'daughter(0, pz)', 'daughter(0, pt)',
90 'daughter(1, p)', 'daughter(1, pz)', 'daughter(1, pt)',
91 'daughter(2, p)', 'daughter(2, pz)', 'daughter(2, pt)',
92 'chiProb', 'dr', 'dz',
93 'daughter(0, dr)', 'daughter(1, dr)',
94 'daughter(0, dz)', 'daughter(1, dz)',
95 'daughter(0, chiProb)', 'daughter(1, chiProb)', 'daughter(2, chiProb)',
96 'daughter(0, kaonID)', 'daughter(0, pionID)',
97 'daughterInvM(0, 1)', 'daughterInvM(0, 2)', 'daughterInvM(1, 2)']
98 general_options.m_variables = basf2_mva.vector(*variables)
99 general_options.m_target_variable = "isSignal"
100
101 specific_options = basf2_mva.PythonOptions()
102 specific_options.m_framework = "torch"
103 specific_options.m_steering_file = 'mva/examples/torch/simple.py'
104 # the number of training epochs
105 specific_options.m_nIterations = 64
106 specific_options.m_mini_batch_size = 256
107 specific_options.m_config = json.dumps({'learning_rate': 1e-2})
108 specific_options.m_training_fraction = 0.8
109 specific_options.m_normalise = False
110
111 training_start = time.time()
112 basf2_mva.teacher(general_options, specific_options)
113 training_stop = time.time()
114 training_time = training_stop - training_start
115 method = basf2_mva_util.Method(general_options.m_identifier)
116
117 inference_start = time.time()
118 test_data = ["test.root"]
119 p, t = method.apply_expert(basf2_mva.vector(*test_data), general_options.m_treename)
120 inference_stop = time.time()
121 inference_time = inference_stop - inference_start
123 print("Torch", training_time, inference_time, auc)
calculate_auc_efficiency_vs_background_retention(p, t, w=None)
network
a dense model with one hidden layer
Definition simple.py:30
__init__(self, number_of_features)
Definition simple.py:22
forward(self, x)
Definition simple.py:39