Belle II Software light-2406-ragdoll
simple_deep.py
1#!/usr/bin/env python3
2
3
10
11import numpy as np
12import tensorflow as tf
13import basf2_mva
14import basf2_mva_util
15import time
16
18
19
20def get_model(number_of_features, number_of_spectators, number_of_events, training_fraction, parameters):
21
22 number_of_features *= 5
23
24 gpus = tf.config.list_physical_devices('GPU')
25 if gpus:
26 for gpu in gpus:
27 tf.config.experimental.set_memory_growth(gpu, True)
28
29 class my_model(tf.Module):
30
31 def __init__(self, **kwargs):
32 super().__init__(**kwargs)
33
34 self.optimizer = tf.optimizers.Adam(0.01)
35
36 def create_layer_variables(shape, name, activation_function):
37 weights = tf.Variable(
38 tf.random.truncated_normal(shape, stddev=1.0 / np.sqrt(float(shape[0]))),
39 name=f'{name}_weights')
40 biases = tf.Variable(tf.zeros(shape=[shape[1]]), name=f'{name}_biases')
41 return weights, biases, activation_function
42
43 self.n_layers = 10
44 self.layer_variables = []
45
46 shape = [number_of_features, number_of_features]
47 for i in range(self.n_layers - 1):
48 self.layer_variables.append(create_layer_variables(shape, f'inference_hidden{i}', tf.nn.relu))
49 self.layer_variables.append(create_layer_variables([number_of_features, 1], 'inference_sigmoid', tf.nn.sigmoid))
50
51 @tf.function(input_signature=[tf.TensorSpec(shape=[None, number_of_features], dtype=tf.float32)])
52 def __call__(self, x):
53
54 def dense(x, W, b, activation_function):
55 return activation_function(tf.matmul(x, W) + b)
56
57 for i in range(self.n_layers):
58 x = dense(x, *self.layer_variables[i])
59 return x
60
61 @tf.function
62 def loss(self, predicted_y, target_y, w):
63 lam = 1e-15
64 epsilon = 1e-5
65 l2_loss = lam * tf.math.add_n([tf.nn.l2_loss(n) for n in self.trainable_variables
66 if '_weights' in n.name and 'sigmoid' not in n.name])
67
68 diff_from_truth = tf.where(target_y == 1., predicted_y, 1. - predicted_y)
69 cross_entropy = - tf.reduce_sum(w * tf.math.log(diff_from_truth + epsilon)) / tf.reduce_sum(w)
70 return cross_entropy + l2_loss
71
72 state = State(model=my_model())
73 state.epoch = 0
74 state.avg_costs = [] # keeps track of the avg costs per batch over an epoch
75
76 return state
77
78
79def partial_fit(state, X, S, y, w, epoch, batch):
80 """
81 Pass batches of received data to tensorflow
82 """
83 X = np.repeat(X, 5, axis=1)
84
85 with tf.GradientTape() as tape:
86 avg_cost = state.model.loss(state.model(X), y, w)
87 grads = tape.gradient(avg_cost, state.model.trainable_variables)
88
89 state.model.optimizer.apply_gradients(zip(grads, state.model.trainable_variables))
90
91 if batch == 0 and epoch == 0:
92 state.avg_costs = [avg_cost]
93 elif batch != state.nBatches-1:
94 state.avg_costs.append(avg_cost)
95 else:
96 # end of the epoch, print summary results, reset the avg_costs and update the counter
97 print(f"Epoch: {epoch:04d} cost= {np.mean(state.avg_costs):.9f}")
98 state.avg_costs = [avg_cost]
99
100 if epoch == 100000:
101 return False
102 return True
103
104
105def apply(state, X):
106 """
107 Apply estimator to passed data.
108 """
109 X = np.repeat(X, 5, axis=1)
110 r = state.model(X).numpy().flatten()
111 return np.require(r, dtype=np.float32, requirements=['A', 'W', 'C', 'O'])
112
113
114if __name__ == "__main__":
115 from basf2 import conditions, find_file
116 # NOTE: do not use testing payloads in production! Any results obtained like this WILL NOT BE PUBLISHED
117 conditions.testing_payloads = [
118 'localdb/database.txt'
119 ]
120 train_file = find_file("mva/train_D0toKpipi.root", "examples")
121 test_file = find_file("mva/test_D0toKpipi.root", "examples")
122
123 training_data = basf2_mva.vector(train_file)
124 testing_data = basf2_mva.vector(test_file)
125
126 general_options = basf2_mva.GeneralOptions()
127 general_options.m_datafiles = training_data
128 general_options.m_identifier = "Tensorflow"
129 general_options.m_treename = "tree"
130 variables = ['M', 'p', 'pt', 'pz',
131 'daughter(0, p)', 'daughter(0, pz)', 'daughter(0, pt)',
132 'daughter(1, p)', 'daughter(1, pz)', 'daughter(1, pt)',
133 'daughter(2, p)', 'daughter(2, pz)', 'daughter(2, pt)',
134 'chiProb', 'dr', 'dz',
135 'daughter(0, dr)', 'daughter(1, dr)',
136 'daughter(0, dz)', 'daughter(1, dz)',
137 'daughter(0, chiProb)', 'daughter(1, chiProb)', 'daughter(2, chiProb)',
138 'daughter(0, kaonID)', 'daughter(0, pionID)',
139 'daughterInvM(0, 1)', 'daughterInvM(0, 2)', 'daughterInvM(1, 2)']
140 general_options.m_variables = basf2_mva.vector(*variables)
141 general_options.m_target_variable = "isSignal"
142
143 specific_options = basf2_mva.PythonOptions()
144 specific_options.m_framework = "tensorflow"
145 specific_options.m_steering_file = 'mva/examples/tensorflow/simple_deep.py'
146 specific_options.m_normalize = True
147 specific_options.m_nIterations = 100
148 specific_options.m_mini_batch_size = 500
149
150 training_start = time.time()
151 basf2_mva.teacher(general_options, specific_options)
152 training_stop = time.time()
153 training_time = training_stop - training_start
154 method = basf2_mva_util.Method(general_options.m_identifier)
155 inference_start = time.time()
156 p, t = method.apply_expert(testing_data, general_options.m_treename)
157 inference_stop = time.time()
158 inference_time = inference_stop - inference_start
160 print("Tensorflow", training_time, inference_time, auc)
def calculate_auc_efficiency_vs_background_retention(p, t, w=None)