Belle II Software development
keras_relational.py
1#!/usr/bin/env python3
2
3
10
11from tensorflow.keras.layers import Layer, Reshape
12from tensorflow.keras import activations
13from tensorflow.keras.activations import sigmoid, tanh
14from tensorflow.keras import backend as K
15import numpy as np
16
17
18class Relations(Layer):
19 """
20 This is a class which implements Relational Layer into Keras.
21 Relational Layer compares every combination of two feature groups with shared weights.
22 Use this class as every other Layer in Keras.
23 Relevant Paper: https://arxiv.org/abs/1706.01427
24 RN(O) = f_phi(sum_phi(g_theta(o_i,o_j)))
25 For flexibility reason only the part g(o_i,o_j) is modelled
26 f_phi corresponds to a MLP net
27 To sum over all permutations please use GlobalAveragePooling1D from keras.
28 """
29
30 def __init__(self, number_features, hidden_feature_shape=[30, 30, 30, 30], activation=tanh, **kwargs):
31 """
32 Init class.
33 """
34
35
36 self.number_features = number_features
37
39
40 self.hidden_feature_shape = hidden_feature_shape
41
42 self.activation = activations.get(activation)
43
44 self.group_len = 0
45
47
48 self.combinations = 0
49
50 super().__init__(**kwargs)
51
52 def build(self, input_shape):
53 """
54 Build all weights for Relations Layer
55 :param input_shape: Input shape of tensor
56 :return: Nothing
57 """
58 # only accept 2D layers
59 assert(len(input_shape) == 3)
60
61 self.number_groups = input_shape[1]
62
63 self.group_len = input_shape[2]
64
65 self.combinations = np.int32(np.math.factorial(self.number_groups) / (2 * np.math.factorial(self.number_groups - 2)))
66
67 dense_shape = [2 * self.group_len] + self.hidden_feature_shape + [self.number_features]
68
69 for i in range(len(dense_shape[:-1])):
70 weights = self.add_weight(name=f'relation_weights_{i}',
71 shape=list(dense_shape[i:i + 2]), initializer='glorot_uniform', trainable=True)
72 bias = self.add_weight(name=f'relation_weights_{i}',
73 shape=(dense_shape[i + 1],), initializer='zeros', trainable=True)
74
75 self.weightvariables.append([weights, bias])
76
77 super().build(input_shape)
78
79 def call(self, inputs):
80 """
81 Compute Relational Layer
82 :param inputs: input tensor
83 :return: output tensor
84 """
85 input_groups = [inputs[:, i, :] for i in range(self.number_groups)]
86 outputs = []
87 for index, group1 in enumerate(input_groups[:-1]):
88 for group2 in input_groups[index + 1:]:
89 net = K.dot(K.concatenate([group1, group2]), self.weightvariables[0][0])
90 net = K.bias_add(net, self.weightvariables[0][1])
91 for variables in self.weightvariables[1:]:
92 net = self.activation(net)
93 net = K.dot(net, variables[0])
94 net = K.bias_add(net, variables[1])
95 outputs.append(sigmoid(net))
96
97 flat_result = K.concatenate(outputs)
98 return Reshape((self.combinations, self.number_features,))(flat_result)
99
100 def compute_output_shape(self, input_shape):
101 """
102 Compute Output shape
103 :return: Output shape
104 """
105 # only 2D layers
106 assert(len(input_shape) == 3)
107
108 self.combinations = np.int32(np.math.factorial(self.number_groups) / (2 * np.math.factorial(self.number_groups - 2)))
109
110 return (input_shape[0], self.combinations, self.number_features)
111
112 def get_config(self):
113 """
114 Config required for saving parameters in keras model.
115 """
116 config = {
117 'number_features': self.number_features,
118 'hidden_feature_shape': self.hidden_feature_shape,
119 'activation': activations.serialize(self.activation)
120 }
121 base_config = super().get_config()
122 return dict(list(base_config.items()) + list(config.items()))
123
124
125class EnhancedRelations(Layer):
126 """
127 This is a class which implements Relational Layer into Keras.
128 See Class Relations for details.
129 EnhanceRelations use an additional input for passing event information to every comparison:
130 RN(O) = f_phi(sum_phi(g_theta(o_i,o_j,q)))
131 q is fed in as second one dimensional input.
132 """
133
134 def __init__(self, number_features, hidden_feature_shape=[30, 30, 30, 30], activation=tanh, **kwargs):
135 """
136 Init class.
137 """
138
139
140 self.number_features = number_features
141
143
144 self.hidden_feature_shape = hidden_feature_shape
145
146 self.activation = activations.get(activation)
147
148 self.group_len = 0
149
151
153
155
156 super().__init__(**kwargs)
157
158 def build(self, input_shape):
159 """
160 Build all weights for Relations Layer
161 :param input_shape: Input shape of tensor
162 :return: Nothing
163 """
164 # accept only 2 inputs
165 assert(len(input_shape) == 2)
166 # first input should be a 2D layers
167 assert(len(input_shape[0]) == 3)
168 # second input should be a 1D layers
169 assert(len(input_shape[1]) == 2)
170
171 self.number_groups = input_shape[0][1]
172
173 self.group_len = input_shape[0][2]
174
175 self.question_len = input_shape[1][1]
176
177 self.combinations = np.int32(np.math.factorial(self.number_groups) / (2 * np.math.factorial(self.number_groups - 2)))
178
179 dense_shape = [2 * self.group_len + self.question_len] + self.hidden_feature_shape + [self.number_features]
180
181 for i in range(len(dense_shape[:-1])):
182 weights = self.add_weight(name=f'relation_weights_{i}',
183 shape=list(dense_shape[i:i + 2]), initializer='glorot_uniform', trainable=True)
184 bias = self.add_weight(name=f'relation_weights_{i}',
185 shape=(dense_shape[i + 1],), initializer='zeros', trainable=True)
186
187 self.weightvariables.append([weights, bias])
188
189 super().build(input_shape)
190
191 def call(self, inputs):
192 """
193 Compute Relational Layer
194 :param inputs: input tensor
195 :return: output tensor
196 """
197 input_groups = [inputs[0][:, i, :] for i in range(self.number_groups)]
198 questions = inputs[1]
199 outputs = []
200 for index, group1 in enumerate(input_groups[:-1]):
201 for group2 in input_groups[index + 1:]:
202 net = K.dot(K.concatenate([group1, group2, questions]), self.weightvariables[0][0])
203 net = K.bias_add(net, self.weightvariables[0][1])
204 for variables in self.weightvariables[1:]:
205 net = self.activation(net)
206 net = K.dot(net, variables[0])
207 net = K.bias_add(net, variables[1])
208 outputs.append(sigmoid(net))
209
210 flat_result = K.concatenate(outputs)
211 return Reshape((self.combinations, self.number_features,))(flat_result)
212
213 def compute_output_shape(self, input_shape):
214 """
215 Compute Output shape
216 :return: Output shape
217 """
218 # accept only 2 inputs
219 assert(len(input_shape) == 2)
220 # first input should be a 2D layers
221 assert(len(input_shape[0]) == 3)
222 # second input should be a 1D layers
223 assert(len(input_shape[1]) == 2)
224
225 self.combinations = np.int32(np.math.factorial(self.number_groups) / (2 * np.math.factorial(self.number_groups - 2)))
226
227 return (input_shape[0][0], self.combinations, self.number_features)
228
229 def get_config(self):
230 """
231 Config required for saving parameters in keras model.
232 """
233 config = {
234 'number_features': self.number_features,
235 'hidden_feature_shape': self.hidden_feature_shape,
236 'activation': activations.serialize(self.activation)
237 }
238 base_config = super().get_config()
239 return dict(list(base_config.items()) + list(config.items()))
weightvariables
saves weights for call
question_len
size of second input vector
hidden_feature_shape
shape of hidden layers used for extracting relations
combinations
number of relation combinations
number_groups
Number of groups in input.
def __init__(self, number_features, hidden_feature_shape=[30, 30, 30, 30], activation=tanh, **kwargs)
group_len
how many neurons has one comparable object
def compute_output_shape(self, input_shape)
activation
activation used for hidden layer in shared weights.
weightvariables
saves weights for call
number_features
Number of features.
def build(self, input_shape)
hidden_feature_shape
shape of hidden layers used for extracting relations
combinations
number of relation combinations
number_groups
Number of groups in input.
def __init__(self, number_features, hidden_feature_shape=[30, 30, 30, 30], activation=tanh, **kwargs)
group_len
how many neurons has one comparable object
def compute_output_shape(self, input_shape)
activation
activation used for hidden layer in shared weights.