import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class Encoder(nn.Module):
    def __init__(self, len_vocab, embedding_size, hidden_size, num_layers):
        super().__init__()
        self.embedding = nn.Embedding(len_vocab, embedding_size)
        self.gru = nn.GRU(embedding_size, hidden_size, num_layers=num_layers, batch_first=True)

    def forward(self, en_text):
        embedded = self.embedding(en_text)
        # input: [batch,sentence_len]
        # output: [batch,sentence_len,embedding_size]
        output, hidden = self.gru(embedded)
        # input: [batch,sentence_len,embedding_size]
        # output: [batch,sentence_len,hidden_size]
        # hidden: [num_layers,batch,hidden_size]
        return output, hidden


class Decoder(nn.Module):
    def __init__(self, len_vocab, embedding_size, hidden_size, num_layers, batch_size, output_len):
        super().__init__()
        self.batch_size = batch_size
        self.output_len = output_len
        self.len_vocab = len_vocab
        self.embedding_size = embedding_size
        self.embedding = nn.Embedding(len_vocab, embedding_size)
        self.gru = nn.GRU(embedding_size, hidden_size, num_layers=num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, len_vocab)

    def forward(self, encoder_hidden, vocab_ch):
        """
        :param encoder_hidden: [batch,hidden_size]
        :param ch_text:  [batch,sentence_len]
        :return:
        """
        decoder_hidden = encoder_hidden
        decoder_input_t = torch.LongTensor([[vocab_ch.index("<SOS>")]] * self.batch_size).to(device)
        # decoder_input: [batch, 1]

        decoder_final_output = torch.zeros([self.batch_size, self.output_len, self.len_vocab]).to(device)
        for t in range(self.output_len):
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input_t, decoder_hidden)

            # decoder_output_t: [batch, 1] 这里的输出的每一行是一个词在词汇表的索引
            # decoder_hidden: [num_layers,batch,hidden_size]
            # flatten()的作用: [[124],
            #                  [77]]=>[124,77]
            # 只有变为一维，才能赋值到 decoder_final_output[:, t]的每一列
            decoder_final_output[:, t, :] = decoder_output_t

            value, index = torch.topk(decoder_output_t, 1)  # index [batch_size,1]
            decoder_input_t = index
        return decoder_final_output, decoder_hidden

    # 对每句话里的每个词进行单独进入rnn模块
    def forward_step(self, input, hidden):
        """
        :param input: [batch,1]
        :param hidden: [num_layers,batch,hidden_size],第一次的hidden_size为encoder的hidden_size
        :return:
        """
        embeded = self.embedding(input)
        # embeded: [batch,1,embedding_size]

        output, hidden = self.gru(embeded, hidden)
        # output:[batch,1,hidden_size]
        # hidden:[num_layers,batch,hidden_size]

        output = self.fc(output)
        # output:[batch,1,vocab_len]

        output = F.softmax(output, dim=2)
        # output:[batch,1],这里是使用softmax选出最大的那个索引
        output = output.squeeze(1)
        return output, hidden


class Seq2Seq(nn.Module):
    def __init__(self, len_vocab1, len_vocab2, embedding_size, hidden_size, num_layers, batch_size, output_len,
                 vocab_ch):
        super(Seq2Seq, self).__init__()
        self.encoder = Encoder(len_vocab1, embedding_size, hidden_size, num_layers)
        self.decoder = Decoder(len_vocab2, embedding_size, hidden_size, num_layers, batch_size, output_len)
        self.vocab_ch = vocab_ch

    def forward(self, input):
        encoder_outputs, encoder_hidden = self.encoder(input)
        decoder_outputs, decoder_hidden = self.decoder(encoder_hidden, self.vocab_ch)
        return decoder_outputs, decoder_hidden
