|
| layers |
| layers
|
|
| norm = nn.LayerNorm(hidden_dim) |
| normalization
|
|
RelationalReasoning
Definition at line 987 of file ieagan.py.
◆ __init__()
__init__ |
( |
| self, |
|
|
| num_layers, |
|
|
| hidden_dim, |
|
|
** | block_args ) |
Constructor.
Definition at line 991 of file ieagan.py.
991 def __init__(self, num_layers, hidden_dim, **block_args):
992 super().__init__()
993
994 self.layers = nn.ModuleList(
995 [EncoderBlock(**block_args) for _ in range(num_layers)]
996 )
997
998 self.norm = nn.LayerNorm(hidden_dim)
999
◆ forward()
forward
Definition at line 1001 of file ieagan.py.
1001 def forward(self, x):
1002 for layer in self.layers:
1003 x = layer(x)
1004
1005 x = self.norm(x)
1006 return x
1007
◆ get_attention_maps()
get_attention_maps |
( |
| self, |
|
|
| x ) |
get attention maps
Definition at line 1009 of file ieagan.py.
1009 def get_attention_maps(self, x):
1010 attention_maps = []
1011 for layer in self.layers:
1012 _, attn_map = layer.self_attn(x, return_attention=True)
1013 attention_maps.append(attn_map)
1014 x = layer(x)
1015 return attention_maps
1016
1017
◆ layers
Initial value:= nn.ModuleList(
[EncoderBlock(**block_args) for _ in range(num_layers)]
)
layers
Definition at line 994 of file ieagan.py.
◆ norm
norm = nn.LayerNorm(hidden_dim) |
The documentation for this class was generated from the following file:
- pxd/scripts/pxd/background_generator/models/ieagan.py