117 ):
118 """
119 Initialise the class.
120
121 :param units: Number of units for the output dimension of GAT Convolutional layers
122 as well as the dimension of global features.
123 :param num_features: Number of features attached to each node or particle as NN input.
124 :param num_pdg: Number of all possible PDG IDs.
125 :param emb_size: Dimension of embedded PDG space.
126 :param attention_heads: Number of attention heads for GAT Convolutional layers.
127 :param n_layers: Number of GAT Convolutional layers.
128 :param use_gap: Whether to use Global Attention Pooling (GAP) for the production of global features.
129 """
130 super().__init__()
131
132 self.pdg_embedding = torch.nn.Embedding(num_pdg + 1, emb_size)
133 in_feats = num_features + emb_size
134
135 self.gat_layers = torch.nn.ModuleList()
136 in_feats_glob = 0
137 for i in range(n_layers):
138 self.gat_layers.append(
139 GATModule(
140 in_feats=in_feats,
141 units=units,
142 num_heads=attention_heads,
143 in_feats_glob=in_feats_glob,
144 use_gap=use_gap
145 )
146 )
147 in_feats = units * attention_heads
148 in_feats_glob = units
149
150
151 self.fc_output = torch.nn.Linear(units, 1)
152