30from tensorflow.keras.layers
import Input, Dense, Concatenate, Dropout, Lambda, GlobalAveragePooling1D, Reshape
31from tensorflow.keras.models
import Model
32from tensorflow.keras.optimizers
import Adam
33from tensorflow.keras.losses
import binary_crossentropy, sparse_categorical_crossentropy
34from tensorflow.keras.activations
import sigmoid, tanh, softmax
35from tensorflow.keras.callbacks
import EarlyStopping, Callback
39from basf2_mva_extensions.keras_relational
import EnhancedRelations
40from basf2_mva_extensions.preprocessing
import fast_equal_frequency_binning
44warnings.filterwarnings(
'ignore', category=UserWarning)
47def slice(input, begin, end):
49 Simple function for slicing feature
in tensors.
51 return input[:, begin:end]
56 Class to create batches for training the Adversary Network.
57 See mva/examples/keras/adversary_network.py
for details.
63 :param X: Input Features
65 :param Z: Spectaters/Qunatity to be uncorrelated to
83 Getting next batch of training data
92 return self.
X[batch_index], self.
Y[batch_index], self.
Z[batch_index]
95def get_model(number_of_features, number_of_spectators, number_of_events, training_fraction, parameters):
97 Build the keras model for training.
99 def adversary_loss(signal):
101 Loss for adversaries outputs
102 :param signal: If signal
or background distribution should be learned.
103 :
return: Loss function
for the discriminator part of the Network.
105 back_constant = 0 if signal
else 1
108 return (y[:, 0] - back_constant) * sparse_categorical_crossentropy(y[:, 1:], p)
111 param = {
'use_relation_layers':
False,
'lambda': 0,
'number_bins': 10,
'adversary_steps': 5}
113 if isinstance(parameters, dict):
114 param.update(parameters)
124 input = Input((number_of_features,))
129 if param[
'use_relation_layers']:
130 low_level_input = Lambda(slice, arguments={
'begin': 0,
'end': 560})(input)
131 high_level_input = Lambda(slice, arguments={
'begin': 560,
'end': 590})(input)
132 relations_tracks = Lambda(slice, arguments={
'begin': 0,
'end': 340})(low_level_input)
133 relations_tracks = Reshape((20, 17))(relations_tracks)
134 relations_clusters = Lambda(slice, arguments={
'begin': 340,
'end': 560})(low_level_input)
135 relations_clusters = Reshape((20, 11))(relations_clusters)
137 relations1 = EnhancedRelations(number_features=20, hidden_feature_shape=[
138 80, 80, 80])([relations_tracks, high_level_input])
139 relations2 = EnhancedRelations(number_features=20, hidden_feature_shape=[
140 80, 80, 80])([relations_clusters, high_level_input])
142 relations_output1 = GlobalAveragePooling1D()(relations1)
143 relations_output2 = GlobalAveragePooling1D()(relations2)
145 net = Concatenate()([relations_output1, relations_output2])
147 net = Dense(units=100, activation=tanh)(net)
148 net = Dropout(0.5)(net)
149 net = Dense(units=100, activation=tanh)(net)
150 net = Dropout(0.5)(net)
153 net = Dense(units=50, activation=tanh)(input)
154 net = Dense(units=50, activation=tanh)(net)
155 net = Dense(units=50, activation=tanh)(net)
157 output = Dense(units=1, activation=sigmoid)(net)
160 apply_model = Model(input, output)
161 apply_model.compile(optimizer=Adam(), loss=binary_crossentropy, metrics=[
'accuracy'])
163 state =
State(apply_model, use_adv=param[
'lambda'] > 0
and number_of_spectators > 0, preprocessor_state=
None)
168 adversaries, adversary_losses_model = [], []
169 for mode
in [
'signal',
'background']:
170 for i
in range(number_of_spectators):
171 adversary1 = Dense(units=2 * param[
'number_bins'], activation=tanh, trainable=
False)(output)
172 adversary2 = Dense(units=2 * param[
'number_bins'], activation=tanh, trainable=
False)(adversary1)
173 adversaries.append(Dense(units=param[
'number_bins'], activation=softmax, trainable=
False)(adversary2))
175 adversary_losses_model.append(adversary_loss(mode ==
'signal'))
178 model1 = Model(input, [output] + adversaries)
179 model1.compile(optimizer=Adam(),
180 loss=[binary_crossentropy] + adversary_losses_model, metrics=[
'accuracy'],
181 loss_weights=[1] + [-parameters[
'lambda']] *
len(adversary_losses_model))
185 model2 = Model(input, adversaries)
187 for layer
in model2.layers:
188 layer.trainable =
not layer.trainable
190 model2.compile(optimizer=Adam(), loss=adversary_losses_model,
191 metrics=[
'accuracy'])
194 state.forward_model, state.adv_model = model1, model2
195 state.K = parameters[
'adversary_steps']
196 state.number_bins = param[
'number_bins']
201def begin_fit(state, Xtest, Stest, ytest, wtest, nBatches):
203 Save Validation Data for monitoring Training
212def partial_fit(state, X, S, y, w, epoch, batch):
215 For every training step of MLP. Adverserial Network (if used) will be trained K times.
219 preprocessor = fast_equal_frequency_binning()
220 preprocessor.fit(X, number_of_bins=500)
221 X = preprocessor.apply(X)
222 state.Xtest = preprocessor.apply(state.Xtest)
224 state.preprocessor_state = preprocessor.export_state()
226 def build_adversary_target(p_y, p_s):
228 Concat isSignal and spectator bins, because both are target information
for the adversary.
230 return [np.concatenate((p_y, i), axis=1)
for i
in np.split(p_s,
len(p_s[0]), axis=1)] * 2
234 S_preprocessor = fast_equal_frequency_binning()
235 S_preprocessor.fit(S, number_of_bins=state.number_bins)
236 S = S_preprocessor.apply(S) * state.number_bins
237 state.Stest = S_preprocessor.apply(state.Stest) * state.number_bins
239 target_array = build_adversary_target(y, S)
240 target_val_array = build_adversary_target(state.ytest, state.Stest)
244 class AUC_Callback(Callback):
246 Callback to print AUC after every epoch.
249 def on_train_begin(self, logs=None):
252 def on_epoch_end(self, epoch, logs=None):
253 val_y_pred = state.model.predict(state.Xtest).flatten()
255 print(f
'\nTest AUC: {val_auc}\n')
256 self.val_aucs.append(val_auc)
259 class Adversary(Callback):
261 Callback to train Adversary
264 def on_batch_end(self, batch, logs=None):
265 v_X, v_y, v_S = state.batch_gen.next_batch(500 * state.K)
266 target_adversary = build_adversary_target(v_y, v_S)
267 state.adv_model.fit(v_X, target_adversary, verbose=0, batch_size=500)
269 if not state.use_adv:
270 state.model.fit(X, y, batch_size=500, epochs=100000, validation_data=(state.Xtest, state.ytest),
271 callbacks=[EarlyStopping(monitor=
'val_loss', patience=10, mode=
'min'), AUC_Callback()])
273 state.forward_model.fit(X, [y] + target_array, batch_size=500, epochs=100000,
274 callbacks=[EarlyStopping(monitor=
'val_loss', patience=10, mode=
'min'), AUC_Callback(), Adversary()],
275 validation_data=(state.Xtest, [state.ytest] + target_val_array))
281 Apply estimator to passed data.
282 Has to be overwritten, because also the expert has to apply preprocessing.
285 preprocessor = fast_equal_frequency_binning(state.preprocessor_state)
287 X = preprocessor.apply(X)
289 r = state.model.predict(X).flatten()
290 return np.require(r, dtype=np.float32, requirements=[
'A',
'W',
'C',
'O'])
def calculate_auc_efficiency_vs_background_retention(p, t, w=None)
pointer
Pointer for index array.
def __init__(self, X, Y, Z)
index_array
Index array, which will be shuffled.
def next_batch(self, batch_size)