
import torch
import torch.nn as nn
import random

SEED = 1
random.seed(SEED)
torch.manual_seed(SEED)

torch.backends.cudnn.deterministic = True


class LSTMEncoder(nn.Module):
    def __init__(self, vocab_size, emd_dim=64, hid_size=128, n_layers=1):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, emd_dim)
        self.rnn = nn.LSTM(emd_dim, hid_size, num_layers=n_layers, batch_first=True)

    def forward(self, x):  # x [batch_size, seq_len]
        x = self.embedding(x)  # x [batch_size, seq_len, embed_dim]
        # outputs: [batch_size, seq_len, rnn_hider]
        # hidden : [n_layers, batch_size, rnn_hidden]
        # cell   : [n_layers, batch_size, rnn_hidden]
        outputs, (hidden, cell) = self.rnn(x)
        return hidden, cell


class LSTMDecoder(nn.Module):
    def __init__(self, vocab_size, emd_dim=64, hid_size=128, n_layers=1):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, emd_dim)
        self.rnn = nn.LSTM(emd_dim, hid_size, num_layers=n_layers, batch_first=True)
        self.out = nn.Linear(hid_size, vocab_size)

    def forward(self, x, hidden, cell):
        # 输入一个 time step
        # x [batch_size, 1]  hidden:[n_layers, batch_size, rnn_hidden] cell : [n_layers, batch_size, rnn_hidden]
        x = self.embedding(x)  # x [batch_size, 1, emd_dim]
        # outputs: [1, seq_len, rnn_hider]
        # hidden : [n_layers, 1, rnn_hidden]
        # cell   : [n_layers, 1, rnn_hidden]
        outputs, (hidden, cell) = self.rnn(x, (hidden, cell))
        x = self.out(outputs)  # [n_layers, 1, vocab_size]
        return x, hidden, cell


class Seq2Seq(nn.Module):
    """ 用于训练的seq2seq模型 """
    def __init__(self,
                 encode_vocab_size,
                 decoder_vocab_size,
                 emd_dim=64,
                 hid_size=128,
                 n_layers=1):
        super().__init__()
        self.encoder = LSTMEncoder(vocab_size=encode_vocab_size,
                                   emd_dim=emd_dim,
                                   hid_size=hid_size,
                                   n_layers=n_layers)
        self.decoder = LSTMDecoder(vocab_size=decoder_vocab_size,
                                   emd_dim=emd_dim,
                                   hid_size=hid_size,
                                   n_layers=n_layers)

    def forward(self, src, trg):
        # [n_layers, batch_size, rnn_hidden] [n_layers, batch_size, rnn_hidden]
        hidden, cell = self.encoder(src)
        t_outputs = []
        for t in range(trg.shape[1]):
            x = trg[:, t]
            x = x.unsqueeze(-1)
            outputs, hidden, cell = self.decoder(x, hidden, cell)
            t_outputs.append(outputs)
        t_outputs = torch.cat(t_outputs, 1)
        return t_outputs

    def inference(self, src, start_index=2):
        hidden, cell = self.encoder(src)
        x = torch.full((src.shape[0], 1), start_index).to(torch.long)
        t_outputs = []
        for t in range(src.shape[1]):
            outputs, hidden, cell = self.decoder(x, hidden, cell)
            x = torch.argmax(outputs, dim=-1)
            t_outputs.append(x)
        t_outputs = torch.cat(t_outputs, 1)
        return t_outputs
