"""
实现attention
"""
import torch.nn as nn
import torch.nn.functional as F
import config
import torch


class Attention(nn.Module):
    def __init__(self, method="general"):
        super(Attention, self).__init__()
        assert method in ["dot", "general", "concat"], "method error"
        self.method = method

        if self.method == "general":
            self.Wa = nn.Linear(config.chatbot_encoder_hidden_size, config.chatbot_decoder_hidden_size, bias=False)

        elif self.method == "concat":
            self.Wa = nn.Linear(config.chatbot_encoder_hidden_size + config.chatbot_decoder_hidden_size,
                                config.chatbot_decoder_hidden_size, bias=False)
            self.Va = nn.Linear(config.chatbot_decoder_hidden_size, 1)

    def forward(self, hidden_state, encoder_outputs):
        """
        :param hidden_state:[num_layer,batch_size,decoder_hidden_size]
        :param encoder_outputs:[batch_size,seq_len,encoder_hidden_size]
        :return:
        """
        # dot:
        if self.method == "dot":
            hidden_state = hidden_state[-1, :, :].unsqueeze(-1)
            attention_weight = encoder_outputs.bmm(hidden_state).squeeze(-1)
            attention_weight = F.softmax(attention_weight, dim=-1)
        # 2.general
        elif self.method == "general":
            encoder_outputs = self.Wa(encoder_outputs)
            hidden_state = hidden_state[-1, :, :].unsqueeze(-1)
            attention_weight = encoder_outputs.bmm(hidden_state).squeeze(-1)
            attention_weight = F.softmax(attention_weight, dim=-1)
        # 3.concat:
        elif self.method == "concat":
            hidden_state = hidden_state[-1, :, :].squeeze(0)
            hidden_state.repeat(1, encoder_outputs.size(1), 1)
            concated = torch.cat([hidden_state, encoder_outputs], dim=-1)
            batch_size = encoder_outputs.size(0)
            encoder_seq_len = encoder_outputs(1)
            attention_weight = self.Va(F.tanh(self.Wa(concated.view((batch_size * encoder_seq_len, -1))))).sequeeze(-1)
            attention_weight = F.softmax(attention_weight.view(batch_size, encoder_seq_len), dim=-1)
        return attention_weight
