import torch
from torch import nn
from torch.nn import functional as F

from NMTDemo.NMTModel.Attention import verbose_attention


class NMTDecoder(nn.Module):
    def __init__(self,num_embeddings,embedding_size,rnn_hidden_size,bos_index):
        super(NMTDecoder,self).__init__()
        self._rnn_hidden_size=rnn_hidden_size
        self.target_embedding=nn.Embedding(num_embeddings=num_embeddings,
                                           embedding_dim=embedding_size,
                                           padding_idx=0)
        self.gru_cell=nn.GRUCell(embedding_size+rnn_hidden_size,
                                 rnn_hidden_size)
        self.hidden_map=nn.Linear(rnn_hidden_size,rnn_hidden_size)
        self.classifier=nn.Linear(rnn_hidden_size*2,num_embeddings)
        self.bos_index=bos_index

    def _init_indices(self,batch_size):
        return torch.ones(batch_size,dtype=torch.int64)*self.bos_index

    def _init_context_vectors(self,batch_size):
        return torch.zeros(batch_size,self._rnn_hidden_size)

    def forward(self,encoder_state,initial_hidden_state,target_sequence):
        #解码器的正向传播，参数为，编码器的输出，编码的最后一个隐藏状态，目标文本
        target_sequence=target_sequence.permute(1,0)
        output_sequence_size=target_sequence.size(0)
        #用传入的编码的最后一个隐藏状态作为解码的初始隐藏状态
        h_t=self.hidden_map(initial_hidden_state)

        batch_size=encoder_state.size(0)
        #将context_vectors初始化为0
        context_vectors=self._init_context_vectors(batch_size)
        y_t_index=self._init_indices(batch_size)

        h_t=h_t.to(encoder_state.device)
        y_t_index=y_t_index.to(encoder_state.device)
        context_vectors=context_vectors.to(encoder_state.device)

        output_vectors=[]
        self._cached_p_attn=[]
        self._cached_ht=[]
        self._cached_decoder_state=encoder_state.cpu().detach().numpy()

        for i in range(output_sequence_size):
            y_t_index=target_sequence[i]
            #1.词嵌入层转换为rnn的输入向量
            y_input_vector=self.target_embedding(y_t_index)
            #把两个张量拼接到一起
            rnn_input=torch.cat([y_input_vector,context_vectors],dim=1)
            #2.GRU得到新的隐藏向量
            h_t=self.gru_cell(rnn_input,h_t)
            self._cached_ht.append(h_t.cpu().detach().numpy())
            #3.用现在的隐藏向量去注意编码时候的状态
            context_vectors,p_attn,_=verbose_attention(encoder_state_vectors=encoder_state,
                                                       query_vector=h_t)
            #缓存注意力的权值用于可视化
            self._cached_p_attn.append(p_attn.cpu().detach().numpy())
            #4.用现在的隐藏状态和上下文向量来预测下一个word
            prediction_vector=torch.cat((context_vectors,h_t),dim=1)
            score_for_y_t_index=self.classifier(F.dropout(prediction_vector,0.3))
            #收集预测的分数
            output_vectors.append(score_for_y_t_index)
        #新维度拼接张量，permute换位
        output_vectors=torch.stack(output_vectors).permute(1,0,2)
        return output_vectors



