class Embedding(nn.Module):
def __init__(self, embedding_matrix):
super(Embedding, self).__init__()
self.embedding = nn.Embedding(embedding_matrix.shape[0], EMBED_SIZE * 2)
self.embedding.weight = nn.Parameter(
torch.tensor(embedding_matrix, dtype=torch.float32)
)
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
def forward(self, x):
x = self.embedding(x)
x = self.embedding_dropout(x)
return x
class Encoder(nn.Module):
def __init__(self, num_sub_targets):
super(Encoder, self).__init__()
self.lstm_1 = nn.LSTM(
EMBED_SIZE * 2, HIDDEN_SIZE, bidirectional=True, batch_first=True
)
self.lstm_2 = nn.LSTM(
HIDDEN_SIZE * 2, HIDDEN_SIZE, bidirectional=True, batch_first=True
)