import torch
from torch import nn
import torch.functional as F
# from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence

class Encoder(nn.Module):
    def __init__(self,in_dim,embed_dim,en_h_dim,de_h_dim,p=0.5) -> None:
        super().__init__()
        """
        in_dim:单词个数(单词维度)\n
        embed_dim:词向量维度\n
        en_h_dim:Encoder hidden size\n
        de_h_dim:Decoder hidden size
        """
        self.in_dim=in_dim
        self.embed_dim=embed_dim
        self.en_h_dim=en_h_dim
        self.de_h_dim=de_h_dim

        self.embed=nn.Embedding(self.in_dim,self.embed_dim)

        ## RNN->LSTM->GRU->.....->


        ## 单向 out:D*B*H hidden:N*B*H  双向 out:D*B*(2*H) hidden:(2*N)*B*H
        self.rnn=nn.GRU(input_size=self.embed_dim,hidden_size=self.en_h_dim,bidirectional=True)

        self.fc=nn.Linear(self.en_h_dim*2,self.de_h_dim,bias=True)

        self.dropout=nn.Dropout(p)


    def forward(self,x):
        x=self.dropout(self.embed(x))
        # x=pack
        o,h=self.rnn(x)

        h=self.fc(torch.cat((h[-1,:,:],h[-2,:,:]),dim=-1))

        ## h->(1*B*de_h)
        h=h.unsqueeze(0)

        return o,h


class Attention(nn.Module):
    def __init__(self,de_h_dim,de_in_dim,att_dim) -> None:
        super().__init__()
        """
        att_dim: 与Encoder outputs 的深度相同 满足QKV模型
        """

        self.de_h_dim=de_h_dim
        self.de_in_dim=de_in_dim
        self.att_dim=att_dim

        ## D*B*F

        self.tanh=nn.Tanh()
        self.fc = nn.Linear(self.de_h_dim+self.de_in_dim,self.att_dim)


    def forward(self,input,hidden,encoder_ouputs):
        

        # print("encoder's size is {}".format(encoder_ouputs.size()))
        # print("input's size is {}".format(input.size()))
        # print("hidden's size is {}".format(hidden.size()))
        ## D*B*H,N*B*H
        ## x->B*att_dim
        ## input->(1*B*E),hidden->(1*B*H)

        ## (B*1*D)*[encoder_outputs->(B*D*H)]
        x=self.fc(torch.cat((input[0],hidden[0]),dim=-1))/(self.de_in_dim**(1/2))

        ## x->B*1*att_dim
        ## encoder_outputs->(arr_dim*B*H)
        x=torch.bmm(x.unsqueeze(1),encoder_ouputs.permute(1,0,2))

        x=x.permute(1,0,2)

        return x



class Decoder(nn.Module):
    def __init__(self,out_dim,de_h_dim,max_length,output_dim,p=0.5) -> None:
        super().__init__()
        """
        max_length:输出单词最大个数
        Q,K,V
        encoder->outputs(D*B*max_length)
        """
        self.out_dim=out_dim
        self.de_h_dim=de_h_dim
        self.embed_dim=de_h_dim

        self.output_dim=output_dim

        self.embed=nn.Embedding(self.out_dim,self.embed_dim)

        self.att=Attention(self.de_h_dim,self.embed_dim,max_length)
        self.rnn=nn.GRU(input_size=self.embed_dim,hidden_size=self.de_h_dim)

        self.fc=nn.Linear(self.de_h_dim,output_dim)

        self.dropout=nn.Dropout(p)

    def forward(self,x,hidden,encoder_outputs):
        # x=x.unsqueeze(0)
        x=self.dropout(self.embed(x)).unsqueeze(0)
        # print(x.size())

        x=self.att(x,hidden,encoder_outputs)

        o,h=self.rnn(x,hidden)

        pre=torch.squeeze(self.fc(o),dim=0)

        return pre,h



class seq2seq(nn.Module):
    def __init__(self,encoder,decoder) -> None:
        super().__init__()

        self.encoder=encoder
        self.decoder=decoder

    def forward(self,x,tar):
        ## "sos i have a dream"
        ## "我:1 有:2 一个:3 梦想:4"
        size=tar.size(0)
        batch=tar.size(1)
        hs=self.decoder.output_dim
        o,h=self.encoder(x)
        ## pre->(B*class_num)
        seq_pre=torch.zeros(size,batch,hs)
        for i in range(size):
            pre,h=self.decoder(tar[i],h,o)
            seq_pre[i]=pre

        return seq_pre

if __name__=="__main__":

    ## i have a dream 
    ## i am a good student 

    x=torch.tensor([[1,4,3],[2,1,4],[1,2,1],[3,2,1]]).T

    en = Encoder(5,8,32,64)
    de = Decoder(10,64,3,25)

    model=seq2seq(en,de)
    target=torch.tensor([[0,1],[0,3],[0,2],[0,1]]).T
    pre=model(x,target)

    print(pre.size())

    Loss=nn.CrossEntropyLoss()

    loss=Loss(pre.permute(0,2,1),target)

    loss.backward()

    # o,h=en(x)

    # print("Encoder output size is {}".format(o.size()))
    # print("Encoder hidden size is {}".format(h.size()))

    # de = Decoder(10,64)
    # Loss=nn.CrossEntropyLoss()

    # input=torch.tensor([[0],[0],[0],[0]]).T
    # target=torch.tensor([1,1,1,1])
    # do,dh=de(input,h,o)
    # print("Decoder output size is {}".format(do.size()))
    # print("Decoder hidden size is {}".format(dh.size()))


        

