import torch
import torch.nn as nn
from kgdlg.modules.Attention import GlobalAttention,KeywordAttention
# from nmt.modules.SRU import SRU
from kgdlg.modules.StackedRNN import StackedGRU, StackedLSTM
import torch.nn.functional as F
from torch.autograd import Variable
import math
import random     


            


class AttnDecoder(nn.Module):
    """ The GlobalAttention-based RNN decoder. """
    def __init__(self, rnn_type, embedding, attn_type, input_size, 
                hidden_size, num_layers=1, dropout=0.1):
        super(AttnDecoder, self).__init__()        
        # Basic attributes.
        self.rnn_type = rnn_type
        self.embedding = embedding
        self.attn_type = attn_type
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.dropout = nn.Dropout(dropout)  

        self.rnn = getattr(nn, rnn_type)(
                input_size=input_size,
                hidden_size=hidden_size,
                num_layers=num_layers,
                dropout=dropout)              

        if self.attn_type != 'none':
            self.attn = GlobalAttention(hidden_size, attn_type)

    def forward(self, input, context, state):
        emb = self.embedding(input)
        rnn_outputs, hidden = self.rnn(emb, state)
        

        if self.attn_type != 'none':
            # Calculate the attention.
            attn_outputs, attn_scores = self.attn(
                rnn_outputs.transpose(0, 1).contiguous(),  # (output_len, batch, d)
                context.transpose(0, 1)                   # (contxt_len, batch, d)
            )

            outputs  = self.dropout(attn_outputs)    # (input_len, batch, d)
            attn = attn_outputs
        else:
            outputs  = self.dropout(rnn_outputs)
            attn = None

        return outputs , hidden, attn

class KeyAttnDecoder(nn.Module):
    """ The GlobalAttention-based RNN decoder. """
    def __init__(self, rnn_type, embedding, attn_type, input_size, 
                hidden_size, num_layers=1, dropout=0.1):
        super(KeyAttnDecoder, self).__init__()        
        # Basic attributes.
        self.rnn_type = rnn_type
        self.attn_type = attn_type
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.dropout = nn.Dropout(dropout)  
        self.embedding = embedding
        self.rnn = getattr(nn, rnn_type)(
                input_size=input_size,
                hidden_size=hidden_size,
                num_layers=num_layers,
                dropout=dropout)              

        if self.attn_type != 'none':
            self.attn = KeywordAttention(hidden_size)
        # self.fc_z = nn.Linear(hidden_size*2,hidden_size)
    def forward(self, input, context, state, latent_z):
        # print("进入到keyAttDecoder")
        # latent_z为当前需要被attention的数据
        emb = self.embedding(input)

        # 先得到当前tgt的输入经过gru之后的hidden
        rnn_outputs, hidden = self.rnn(emb, state)

        #latent z先扩展为结构与当前tgt的长度一致
        latent_z = latent_z.expand_as(rnn_outputs)

        # rnn_outputs = self.fc_z(torch.cat([latent_z.expand_as(rnn_outputs),rnn_outputs],-1))
        if self.attn_type != 'none':
            # print("attn_type为None")
            # Calculate the attention.
            rnn_outputs = rnn_outputs.transpose(0, 1).contiguous()
            context = context.transpose(0, 1)
            latent_z = latent_z.transpose(0, 1)
            attn_outputs, attn_scores = self.attn(
                rnn_outputs,  # (output_len, batch, d)
                context,                   # (contxt_len, batch, d)
                latent_z
            )

            outputs  = self.dropout(attn_outputs)    # (input_len, batch, d)
            attn = attn_outputs
        else:
            print("attn_type存在")
            outputs  = self.dropout(rnn_outputs)
            attn = None

        return outputs , hidden, attn
