import torch.nn as nn
from torch.autograd import Variable

# 建立一个三层感知机，用于处理词嵌入和预测
class MLP(nn.Module):
    def __init__(self,input_size,hidden_size1,hidden_size2,output_size):
        super().__init__()
        self.fc1 = nn.Linear(input_size,hidden_size1)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size1,hidden_size2)
        self.relu2 = nn.ReLU()
        self.fc3 = nn.Linear(hidden_size2,output_size)
    def forward(self, x):
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.fc2(x)
        x = self.relu2(x)
        x = self.fc3(x)
        return x

class WordEmb(nn.Module):
    def __init__(self, vocab_size, embedding_dim, input_hidden1, input_hidden2, lstm_input) -> None:
        super().__init__()
        self.emb = nn.Embedding(vocab_size, embedding_dim)
        self.mlp = MLP(input_size=embedding_dim, hidden_size1=input_hidden1, hidden_size2=input_hidden2, output_size=lstm_input)
        self.emb2 = nn.Embedding(vocab_size, lstm_input)
    def forward(self, x):
        x = self.emb(x)
        x = self.mlp(x)
        # x = self.emb2(x)
        return x
    
class RNN(nn.Module):
    def __init__(self, vocab_size, embedding_dim, input_hidden1, input_hidden2, lstm_input, lstm_layers, lstm_output, output_hidden1, output_hidden2, if_lstm=True) -> None:
        super().__init__()
        self.wordemb = WordEmb(vocab_size=vocab_size, embedding_dim=embedding_dim, input_hidden1=input_hidden1, input_hidden2=input_hidden2, lstm_input=lstm_input)
        self.lstm = nn.LSTM(lstm_input, lstm_output, num_layers=lstm_layers, batch_first=True)
        self.gru = nn.GRU(lstm_input, lstm_output, num_layers=lstm_layers, batch_first=True)
        self.mlp = MLP(input_size=lstm_output, hidden_size1=output_hidden1, hidden_size2=output_hidden2, output_size=vocab_size)
        self.line = nn.Linear(in_features=lstm_output,out_features=vocab_size)
        self.hidden_dim = lstm_output
        self.if_lstm = if_lstm
    def forward(self, input, hidden=None):
        batch_size, seq_len = input.size()
        if hidden is None:
            h_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
            c_0 = input.data.new(2, batch_size, self.hidden_dim).fill_(0).float()
            h_0, c_0 = Variable(h_0), Variable(c_0)
        elif self.if_lstm:
            h_0, c_0 = hidden
        else:
            h_0 = hidden
        embeds = self.wordemb(input)     # (batch_size, seq_len, embedding_dim)
        if self.if_lstm:
            output, hidden = self.lstm(embeds, (h_0, c_0))      #(batch_size, seq_len, hidden_dim), (1,1,256)
        else:
            output, hidden = self.gru(embeds, h_0)      #(batch_size, seq_len, hidden_dim), (1,1,256)
        output = self.mlp(output.reshape(seq_len*batch_size, -1))      # ((seq_len * batch_size),hidden_dim), (1,256) → (1,8293)
        # output = self.line(output.reshape(seq_len*batch_size, -1))
        return output, hidden