Belle II Software development
simple.py
1#!/usr/bin/env python3
2
3
10
11"""
12Minimal example for:
13- creating and fitting a model in pytorch
14- converting the model to ONNX
15- creating an MVA weightfile with the ONNX model
16- running inference in basf2 directly via the MVA expert interface
17"""
18
19import basf2 # noqa
20import torch
21from torch import nn
22import numpy as np
23import uproot
24
25import ROOT
26
27
28class Model(nn.Module):
29 """
30 My dense neural network
31 """
32
33 def __init__(self, number_of_features):
34 """
35 Parameters:
36 number_of_features: number of input features
37 """
38 super().__init__()
39
40
41 self.network = nn.Sequential(
42 nn.Linear(number_of_features, 128),
43 nn.ReLU(),
44 nn.Linear(128, 128),
45 nn.ReLU(),
46 nn.Linear(128, 1),
47 nn.Sigmoid(),
48 )
49
50 def forward(self, x):
51 """
52 Run the network
53 """
54 prob = self.network(x)
55 return prob
56
57
58def fit(model, filename, treename, variables, target_variable):
59 with uproot.open({filename: treename}) as tree:
60 X = tree.arrays(
61 map(ROOT.Belle2.MakeROOTCompatible.makeROOTCompatible, variables),
62 library="pd",
63 ).to_numpy()
64 y = tree[target_variable].array(library="np")
65 ds = torch.utils.data.TensorDataset(
66 torch.tensor(X, dtype=torch.float32),
67 torch.tensor(y, dtype=torch.float32)[:, np.newaxis]
68 )
69 dl = torch.utils.data.DataLoader(ds, batch_size=256, shuffle=True)
70 opt = torch.optim.Adam(model.parameters())
71 for epoch in range(50):
72 print(f"Epoch {epoch}", end=", ")
73 losses = []
74 for bx, by in dl:
75 opt.zero_grad()
76 p = model(bx)
77 loss = torch.nn.functional.binary_cross_entropy(p, by)
78 loss.backward()
79 opt.step()
80 losses.append(loss.detach().item())
81 print(f"Loss = {np.mean(losses)}", end="\r")
82 print()
83
84
85if __name__ == "__main__":
86 import time
87
88 from basf2_mva_util import (
89 create_onnx_mva_weightfile,
90 Method,
91 calculate_auc_efficiency_vs_background_retention,
92 )
93 from basf2 import find_file
94
95 train_file = find_file("mva/train_D0toKpipi.root", "examples")
96 test_file = find_file("mva/test_D0toKpipi.root", "examples")
97
98 variables = ['M', 'p', 'pt', 'pz',
99 'daughter(0, p)', 'daughter(0, pz)', 'daughter(0, pt)',
100 'daughter(1, p)', 'daughter(1, pz)', 'daughter(1, pt)',
101 'daughter(2, p)', 'daughter(2, pz)', 'daughter(2, pt)',
102 'chiProb', 'dr', 'dz',
103 'daughter(0, dr)', 'daughter(1, dr)',
104 'daughter(0, dz)', 'daughter(1, dz)',
105 'daughter(0, chiProb)', 'daughter(1, chiProb)', 'daughter(2, chiProb)',
106 'daughter(0, kaonID)', 'daughter(0, pionID)',
107 'daughterInvM(0, 1)', 'daughterInvM(0, 2)', 'daughterInvM(1, 2)']
108
109 model = Model(len(variables))
110 fit(
111 model,
112 train_file,
113 "tree",
114 variables,
115 "isSignal",
116 )
117
118 torch.onnx.export(
119 model,
120 (torch.randn(1, len(variables)),),
121 "model.onnx",
122 input_names=["input"],
123 output_names=["output"],
124 )
125
126 weightfile = create_onnx_mva_weightfile(
127 "model.onnx",
128 variables=variables,
129 target_variable="isSignal",
130 )
131 weightfile.save("model_mva.xml")
132
133 method = Method("model_mva.xml")
134 inference_start = time.time()
135 p, t = method.apply_expert([test_file], "tree")
136 inference_stop = time.time()
137 inference_time = inference_stop - inference_start
138 auc = calculate_auc_efficiency_vs_background_retention(p, t)
139 print("ONNX", inference_time, auc)
STL class.
STL class.
network
a dense model with one hidden layer
Definition simple.py:41
__init__(self, number_of_features)
Definition simple.py:33
forward(self, x)
Definition simple.py:50