import torch
import torch.nn as nn
import 英法data_loader as data_loader

class Seq2SeqEncoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)   #词嵌入，相当于one-hot，但是具有可学习的参数
            #把(batch_size, num_step)形状的X转化成(batch_size, num_step, embed_size)形状的嵌入编码张量
        self.rnn = nn.LSTM(embed_size, num_hiddens, num_layers, dropout=dropout)

    def forward(self, X, *args):
        X = self.embedding(X)
        X = X.permute(1, 0, 2)  #交换维度，把位于原来的0，1，2变为1，0，2，
                                # 即把num_step和batch_size交换，便于批次内同时训练,其实是因为rnn需要这样的输入
        output, state = self.rnn(X)
        return output, state    #output的形状是(num_steps, batch_size, num_hiddens)
                                #state的形状是(num_layers, batch_size, num_hiddens)最后一个时间步的隐藏状态


class Seq2SeqDecoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.LSTM(num_hiddens+embed_size, num_hiddens, num_layers, dropout=dropout)
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, *args):   #enc_outputs是一个元组，里面有encoder的(output, state)
        return enc_outputs[1]


    def forward(self, X, state):
        X = self.embedding(X)
        X = X.permute(1, 0, 2)#现在的X(num_steps, batch_size, embed_size)
        context = state[-1].repeat(X.shape[0], 1, 1)#state的形状是(num_layers, batch_size, num_hiddens)最后一个时间步的隐藏状态
        #只取最后一层，因为前面的层都可以在最后一层得到反映，形状为(batch_size, num_hiddens)
        #在前面广播出一维，重复X.shape[0]次，即num_steps次，这样就能与X合并在一起了
        X_and_context = torch.cat((X, context), 2)
        output, state = nn.LSTM(X_and_context, state)
        output = self.dense(output).permute(1, 0, 2)
        # output的形状:(batch_size,num_steps,vocab_size)
        # state的形状:(num_layers,batch_size,num_hiddens)
        return output, state

    