13- creating and fitting a model in pytorch
14- converting the model to ONNX
15- creating an MVA weightfile with the ONNX model
16- running inference in basf2 directly via the MVA expert interface
30 My dense neural network
36 number_of_features: number of input features
42 nn.Linear(number_of_features, 128),
58def fit(model, filename, treename, variables, target_variable):
59 with uproot.open({filename: treename})
as tree:
61 map(ROOT.Belle2.MakeROOTCompatible.makeROOTCompatible, variables),
64 y = tree[target_variable].
array(library=
"np")
65 ds = torch.utils.data.TensorDataset(
66 torch.tensor(X, dtype=torch.float32),
67 torch.tensor(y, dtype=torch.float32)[:, np.newaxis]
69 dl = torch.utils.data.DataLoader(ds, batch_size=256, shuffle=
True)
70 opt = torch.optim.Adam(model.parameters())
71 for epoch
in range(50):
72 print(f
"Epoch {epoch}", end=
", ")
77 loss = torch.nn.functional.binary_cross_entropy(p, by)
80 losses.append(loss.detach().item())
81 print(f
"Loss = {np.mean(losses)}", end=
"\r")
85if __name__ ==
"__main__":
88 from basf2_mva_util
import (
89 create_onnx_mva_weightfile,
91 calculate_auc_efficiency_vs_background_retention,
93 from basf2
import find_file
95 train_file = find_file(
"mva/train_D0toKpipi.root",
"examples")
96 test_file = find_file(
"mva/test_D0toKpipi.root",
"examples")
98 variables = [
'M',
'p',
'pt',
'pz',
99 'daughter(0, p)',
'daughter(0, pz)',
'daughter(0, pt)',
100 'daughter(1, p)',
'daughter(1, pz)',
'daughter(1, pt)',
101 'daughter(2, p)',
'daughter(2, pz)',
'daughter(2, pt)',
102 'chiProb',
'dr',
'dz',
103 'daughter(0, dr)',
'daughter(1, dr)',
104 'daughter(0, dz)',
'daughter(1, dz)',
105 'daughter(0, chiProb)',
'daughter(1, chiProb)',
'daughter(2, chiProb)',
106 'daughter(0, kaonID)',
'daughter(0, pionID)',
107 'daughterInvM(0, 1)',
'daughterInvM(0, 2)',
'daughterInvM(1, 2)']
109 model =
Model(len(variables))
120 (torch.randn(1, len(variables)),),
122 input_names=[
"input"],
123 output_names=[
"output"],
126 weightfile = create_onnx_mva_weightfile(
129 target_variable=
"isSignal",
131 weightfile.save(
"model_mva.xml")
133 method =
Method(
"model_mva.xml")
134 inference_start = time.time()
135 p, t = method.apply_expert([test_file],
"tree")
136 inference_stop = time.time()
137 inference_time = inference_stop - inference_start
138 auc = calculate_auc_efficiency_vs_background_retention(p, t)
139 print(
"ONNX", inference_time, auc)
network
a dense model with one hidden layer
__init__(self, number_of_features)