7 import tensorflow
as tf
15 old_time = time.time()
18 def get_model(number_of_features, number_of_spectators, number_of_events, training_fraction, parameters):
20 number_of_features *= 20
21 tf.reset_default_graph()
22 x = tf.placeholder(tf.float32, [
None, number_of_features])
23 y = tf.placeholder(tf.float32, [
None, 1])
25 def layer(x, shape, name, unit=tf.sigmoid):
26 with tf.name_scope(name)
as scope:
27 weights = tf.Variable(tf.truncated_normal(shape, stddev=1.0 / np.sqrt(float(shape[0]))), name=
'weights')
28 biases = tf.Variable(tf.constant(0.0, shape=[shape[1]]), name=
'biases')
29 weight_decay = tf.reduce_sum(0.001 * tf.nn.l2_loss(weights))
30 tf.add_to_collection(
'losses', weight_decay)
31 layer = unit(tf.matmul(x, weights) + biases)
34 inference_hidden1 = layer(x, [number_of_features, number_of_features],
'inference_hidden1')
35 inference_hidden2 = layer(inference_hidden1, [number_of_features, number_of_features],
'inference_hidden2')
36 inference_hidden3 = layer(inference_hidden2, [number_of_features, number_of_features],
'inference_hidden3')
37 inference_hidden4 = layer(inference_hidden3, [number_of_features, number_of_features],
'inference_hidden4')
38 inference_hidden5 = layer(inference_hidden4, [number_of_features, number_of_features],
'inference_hidden5')
39 inference_hidden6 = layer(inference_hidden5, [number_of_features, number_of_features],
'inference_hidden6')
40 inference_hidden7 = layer(inference_hidden6, [number_of_features, number_of_features],
'inference_hidden7')
41 inference_hidden8 = layer(inference_hidden7, [number_of_features, number_of_features],
'inference_hidden8')
42 inference_hidden9 = layer(inference_hidden8, [number_of_features, number_of_features],
'inference_hidden9')
43 inference_hidden10 = layer(inference_hidden9, [number_of_features, number_of_features],
'inference_hidden10')
44 inference_hidden11 = layer(inference_hidden10, [number_of_features, number_of_features],
'inference_hidden11')
45 inference_hidden12 = layer(inference_hidden11, [number_of_features, number_of_features],
'inference_hidden12')
46 inference_hidden13 = layer(inference_hidden12, [number_of_features, number_of_features],
'inference_hidden13')
47 inference_hidden14 = layer(inference_hidden13, [number_of_features, number_of_features],
'inference_hidden14')
48 inference_hidden15 = layer(inference_hidden14, [number_of_features, number_of_features],
'inference_hidden15')
49 inference_activation = layer(inference_hidden15, [number_of_features, 1],
'inference_sigmoid', unit=tf.sigmoid)
52 inference_loss = (-tf.reduce_sum(y * tf.log(inference_activation + epsilon) +
53 (1.0 - y) * tf.log(1 - inference_activation + epsilon)) +
54 tf.reduce_sum(tf.get_collection(
'losses')))
56 inference_optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
57 inference_minimize = inference_optimizer.minimize(inference_loss)
59 init = tf.global_variables_initializer()
61 config = tf.ConfigProto()
62 config.gpu_options.allow_growth =
True
63 session = tf.Session(config=config)
66 state = State(x, y, inference_activation, inference_loss, inference_minimize, session)
70 def partial_fit(state, X, S, y, w, epoch):
72 Pass received data to tensorflow session
78 indices = np.arange(len(X))
80 np.random.shuffle(indices)
81 for pos
in range(0, len(indices), batch_size):
83 if pos + batch_size >= len(indices):
85 index = indices[pos: pos + batch_size]
86 x_batch = np.repeat(X[index], 20, axis=1)
89 feed_dict = {state.x: x_batch, state.y: y_batch}
90 state.session.run(state.optimizer, feed_dict=feed_dict)
93 avg_cost = state.session.run(state.cost, feed_dict=feed_dict)
94 new_time = time.time()
95 print(
"Time Difference", new_time - old_time)
97 print(
"Epoch:",
'%04d' % (epoch),
"cost=",
"{:.9f}".format(avg_cost))
101 if __name__ ==
"__main__":
102 from basf2
import conditions
104 conditions.testing_payloads = [
105 'localdb/database.txt'
108 general_options = basf2_mva.GeneralOptions()
109 general_options.m_datafiles = basf2_mva.vector(
"train.root")
110 general_options.m_identifier =
"Tensorflow"
111 general_options.m_treename =
"tree"
112 variables = [
'M',
'p',
'pt',
'pz',
113 'daughter(0, p)',
'daughter(0, pz)',
'daughter(0, pt)',
114 'daughter(1, p)',
'daughter(1, pz)',
'daughter(1, pt)',
115 'daughter(2, p)',
'daughter(2, pz)',
'daughter(2, pt)',
116 'chiProb',
'dr',
'dz',
117 'daughter(0, dr)',
'daughter(1, dr)',
118 'daughter(0, dz)',
'daughter(1, dz)',
119 'daughter(0, chiProb)',
'daughter(1, chiProb)',
'daughter(2, chiProb)',
120 'daughter(0, kaonID)',
'daughter(0, pionID)',
121 'daughterInvariantMass(0, 1)',
'daughterInvariantMass(0, 2)',
'daughterInvariantMass(1, 2)']
122 general_options.m_variables = basf2_mva.vector(*variables)
123 general_options.m_target_variable =
"isSignal"
125 specific_options = basf2_mva.PythonOptions()
126 specific_options.m_framework =
"tensorflow"
127 specific_options.m_steering_file =
'mva/examples/tensorflow/simple_deep.py'
128 specific_options.m_normalize =
True
130 training_start = time.time()
131 basf2_mva.teacher(general_options, specific_options)
132 training_stop = time.time()
133 training_time = training_stop - training_start
135 inference_start = time.time()
136 test_data = [
"test.root"] * 10
137 p, t = method.apply_expert(basf2_mva.vector(*test_data), general_options.m_treename)
138 inference_stop = time.time()
139 inference_time = inference_stop - inference_start
141 print(
"Tensorflow", training_time, inference_time, auc)