12 import tensorflow 
as tf
 
   20 def get_model(number_of_features, number_of_spectators, number_of_events, training_fraction, parameters):
 
   22     number_of_features *= 5
 
   24     gpus = tf.config.list_physical_devices(
'GPU')
 
   27             tf.config.experimental.set_memory_growth(gpu, 
True)
 
   29     class my_model(tf.Module):
 
   31         def __init__(self, **kwargs):
 
   32             super().__init__(**kwargs)
 
   34             self.optimizer = tf.optimizers.Adam(0.01)
 
   36             def create_layer_variables(shape, name, activation_function):
 
   37                 weights = tf.Variable(
 
   38                     tf.random.truncated_normal(shape, stddev=1.0 / np.sqrt(float(shape[0]))),
 
   39                     name=f
'{name}_weights')
 
   40                 biases = tf.Variable(tf.zeros(shape=[shape[1]]), name=f
'{name}_biases')
 
   41                 return weights, biases, activation_function
 
   44             self.layer_variables = []
 
   46             shape = [number_of_features, number_of_features]
 
   47             for i 
in range(self.n_layers - 1):
 
   48                 self.layer_variables.append(create_layer_variables(shape, f
'inference_hidden{i}', tf.nn.relu))
 
   49             self.layer_variables.append(create_layer_variables([number_of_features, 1], 
'inference_sigmoid', tf.nn.sigmoid))
 
   51         @tf.function(input_signature=[tf.TensorSpec(shape=[None, number_of_features], dtype=tf.float32)])
 
   52         def __call__(self, x):
 
   54             def dense(x, W, b, activation_function):
 
   55                 return activation_function(tf.matmul(x, W) + b)
 
   57             for i 
in range(self.n_layers):
 
   58                 x = dense(x, *self.layer_variables[i])
 
   62         def loss(self, predicted_y, target_y, w):
 
   65             l2_loss = lam * tf.math.add_n([tf.nn.l2_loss(n) 
for n 
in self.trainable_variables
 
   66                                            if '_weights' in n.name 
and 'sigmoid' not in n.name])
 
   68             diff_from_truth = tf.where(target_y == 1., predicted_y, 1. - predicted_y)
 
   69             cross_entropy = - tf.reduce_sum(w * tf.math.log(diff_from_truth + epsilon)) / tf.reduce_sum(w)
 
   70             return cross_entropy + l2_loss
 
   72     state = State(model=my_model())
 
   79 def partial_fit(state, X, S, y, w, epoch, batch):
 
   81     Pass batches of received data to tensorflow 
   83     X = np.repeat(X, 5, axis=1)
 
   85     with tf.GradientTape() 
as tape:
 
   86         avg_cost = state.model.loss(state.model(X), y, w)
 
   87         grads = tape.gradient(avg_cost, state.model.trainable_variables)
 
   89     state.model.optimizer.apply_gradients(zip(grads, state.model.trainable_variables))
 
   91     if batch == 0 
and epoch == 0:
 
   92         state.avg_costs = [avg_cost]
 
   93     elif batch != state.nBatches-1:
 
   94         state.avg_costs.append(avg_cost)
 
   97         print(f
"Epoch: {epoch:04d} cost= {np.mean(state.avg_costs):.9f}")
 
   98         state.avg_costs = [avg_cost]
 
  107     Apply estimator to passed data. 
  109     X = np.repeat(X, 5, axis=1)
 
  110     r = state.model(X).numpy().flatten()
 
  111     return np.require(r, dtype=np.float32, requirements=[
'A', 
'W', 
'C', 
'O'])
 
  114 if __name__ == 
"__main__":
 
  115     from basf2 
import conditions
 
  117     conditions.testing_payloads = [
 
  118         'localdb/database.txt' 
  121     general_options = basf2_mva.GeneralOptions()
 
  122     general_options.m_datafiles = basf2_mva.vector(
"train.root")
 
  123     general_options.m_identifier = 
"Tensorflow" 
  124     general_options.m_treename = 
"tree" 
  125     variables = [
'M', 
'p', 
'pt', 
'pz',
 
  126                  'daughter(0, p)', 
'daughter(0, pz)', 
'daughter(0, pt)',
 
  127                  'daughter(1, p)', 
'daughter(1, pz)', 
'daughter(1, pt)',
 
  128                  'daughter(2, p)', 
'daughter(2, pz)', 
'daughter(2, pt)',
 
  129                  'chiProb', 
'dr', 
'dz',
 
  130                  'daughter(0, dr)', 
'daughter(1, dr)',
 
  131                  'daughter(0, dz)', 
'daughter(1, dz)',
 
  132                  'daughter(0, chiProb)', 
'daughter(1, chiProb)', 
'daughter(2, chiProb)',
 
  133                  'daughter(0, kaonID)', 
'daughter(0, pionID)',
 
  134                  'daughterInvM(0, 1)', 
'daughterInvM(0, 2)', 
'daughterInvM(1, 2)']
 
  135     general_options.m_variables = basf2_mva.vector(*variables)
 
  136     general_options.m_target_variable = 
"isSignal" 
  138     specific_options = basf2_mva.PythonOptions()
 
  139     specific_options.m_framework = 
"tensorflow" 
  140     specific_options.m_steering_file = 
'mva/examples/tensorflow/simple_deep.py' 
  141     specific_options.m_normalize = 
True 
  142     specific_options.m_nIterations = 100
 
  143     specific_options.m_mini_batch_size = 500
 
  145     training_start = time.time()
 
  146     basf2_mva.teacher(general_options, specific_options)
 
  147     training_stop = time.time()
 
  148     training_time = training_stop - training_start
 
  150     inference_start = time.time()
 
  151     test_data = [
"test.root"] * 10
 
  152     p, t = method.apply_expert(basf2_mva.vector(*test_data), general_options.m_treename)
 
  153     inference_stop = time.time()
 
  154     inference_time = inference_stop - inference_start
 
  156     print(
"Tensorflow", training_time, inference_time, auc)
 
def calculate_auc_efficiency_vs_background_retention(p, t, w=None)