import torch.nn as nn
import torch
import math
class ELMo(nn.Module):
    def __init__(self, embed_size, hidden_size, output_size, vocab_size):
        super(ELMo, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.gru = nn.GRU(embed_size, hidden_size, batch_first=True, bidirectional=True)

    def forward(self, input_ids): # 新增了 labels 参数
        embedding = self.embedding(input_ids)
        elmo_embedding, _ = self.gru(embedding)
        return elmo_embedding
    
class selfAttention(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(selfAttention, self).__init__()
        self.q_w = nn.Linear(input_size, hidden_size)
        self.k_w = nn.Linear(input_size, hidden_size)
        self.v_w = nn.Linear(input_size, hidden_size)

    def forward(self, x): # 新增了 labels 参数
        q = self.q_w(x)
        k = self.k_w(x)
        v = self.v_w(x)
        att_scores = torch.softmax(torch.matmul(q, k.permute(0, 2, 1)) / math.sqrt(k.size(-1)), -1)
        updated_content = torch.matmul(att_scores, v)
        return updated_content
    

class MyNeuralNetwork(nn.Module):
    def __init__(self, embed_size, hidden_size, output_size, vocab_size):
        super(MyNeuralNetwork, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.gru = nn.GRU(embed_size, hidden_size, batch_first=True)
        self.self_att = selfAttention(hidden_size, hidden_size)
        self.classifier = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()
        self.loss_fn = nn.CrossEntropyLoss()
        self.dropout = nn.Dropout(0.1)

    def forward(self, input_ids, labels=None): # 新增了 labels 参数
        x = self.embedding(input_ids)
        x, _ = self.gru(x)
        x = self.self_att(x) # 得到的x应该是（batch_size, seq_len, hidden_size）
        x = self.dropout(torch.sum(x, 1)) # torch.sum之后x变为 (batch_size, hidden_size)
        logits = self.classifier(x) # 将最后一层输出命名为 logits
        # 如果提供了 labels，则计算 loss
        loss = None
        if labels is not None:
            loss = self.loss_fn(logits, labels)

        # 按照 Trainer 的要求返回
        return {'loss': loss, 'logits': logits}
    
# 原本的模型 74.73%
# 加上自注意力机制后 75.34%