import torch
from torch import nn
from d2l import torch as d2l
import Attention as at

class Seq2SeqAttentionDecoder(d2l.AttentionDecoder):
    def __init__(self,vocab_size,embed_size,num_hiddens,num_layers,dropout=0,**kwargs):
        super(Seq2SeqAttentionDecoder,self).__init__(**kwargs)
        self.attention = at.AdditiveAttention(num_hiddens,num_hiddens,num_hiddens,dropout)
        self.embedding = nn.Embedding(vocab_size,embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens,num_hiddens,num_layers,dropout=dropout)
        self.dense = nn.Linear(num_hiddens,vocab_size)
    
    def init_state(self,enc_output,enc_valid_lens,*args):
        outputs,hidden_state = enc_output
        return (outputs.permute(1,0,2),hidden_state,enc_valid_lens)
    
    def forward(self,X,state):
        enc_outputs,hidden_state,enc_valid_lens = state
        X = self.embedding(X).permute(1,0,2)
        outputs,self.attention_weights = [], []
        for x in X:
            query = torch.unsqueeze(hidden_state[-1],dim=1)
            context = self.attention(query,enc_outputs,enc_outputs,enc_valid_lens)
            x = torch.cat((context, torch.unsqueeze(x,dim=1)),dim=1)
            output,hidden_state = self.rnn(x.permute(1,0,2),hidden_state)
            outputs.append(output)
            self._attention_weights.append(self.attention.attention_weights)
        outputs = self.dense(torch.cat(outputs,dim=0))
        return outputs.permute(1,0,2),[enc_outputs,hidden_state,enc_valid_lens]