"""
attention机制
"""
import torch.nn as nn
import torch

import config


class Attention(nn.Module):
    def __init__(self, method="general"):
        super(Attention, self).__init__()
        assert method in ["dot", "general", "concat"], "传入的方法无效"
        self.method = method
        if self.method == "dot":
            pass
        elif self.method == "general":
            self.Wa = nn.Linear(config.chatbot_decoder_hidden_size,
                                config.chatbot_decoder_hidden_size)
        else:
            self.Wa = nn.Linear(config.chatbot_decoder_hidden_size * 2,
                                config.chatbot_decoder_hidden_size)
            self.Va = nn.Linear(config.chatbot_decoder_hidden_size,
                                config.chatbot_decoder_hidden_size)

    def forward(self, encoder_output, decoder_hidden):
        if self.method == "dot":
            return self.dot_score(encoder_output, decoder_hidden)
        elif self.method == "general":
            return self.general_score(encoder_output, decoder_hidden)
        else:
            return self.concat_score(encoder_output, decoder_hidden)

    def dot_score(self, encoder_output, decoder_hidden):
        """
        encoder_output: [batch_size, seq_len, hidden_size]
        decoder_hidden: [1, batch_size, hidden_size]
        """
        # decoder_hidden: [batch_size, hidden_size, 1]
        decoder_hidden = decoder_hidden.permute(1, 2, 0)

        # attention_weights: [batch_size, seq_len, 1]
        attention_weights = torch.bmm(encoder_output, decoder_hidden)

        # [batch_size, seq_len]
        return nn.functional.softmax(attention_weights.squeeze(-1), dim=-1)

    def general_score(self, encoder_output, decoder_hidden):
        """
        encoder_output: [batch_size, seq_len, hidden_size]
        decoder_hidden: [1, batch_size, hidden_size]
        """
        # decoder_hidden: [batch_size, hidden_size, 1]
        decoder_hidden = self.Wa(decoder_hidden.squeeze(0)).unsqueeze(-1)

        # attention_weights: [batch_size, seq_len, 1]
        attention_weights = torch.bmm(encoder_output, decoder_hidden)

        return nn.functional.softmax(attention_weights.squeeze(-1), dim=-1)

    def concat_score(self, encoder_output, decoder_hidden):
        """
        encoder_output: [batch_size, seq_len, hidden_size]
        decoder_hidden: [1, batch_size, hidden_size]
        """
        # decoder_hidden: [batch_size, seq_len, hidden_size]
        decoder_hidden = decoder_hidden.squeeze(0).repeat(1, encoder_output.size(1), 1)

        # concat_data: [batch_size, seq_len, hidden_size*2]
        concat_data = torch.cat([encoder_output, decoder_hidden], dim=-1)

        # concat_data: [batch_size, seq_len, hidden_size]
        concat_data = self.Wa(concat_data)

        # concat_data: [batch_szie, seq_len, hidden_size]
        concat_data = torch.tanh(concat_data)

        # atten_weigthts: [batch_size, seq_len, 1]
        attention_weights = self.Va(concat_data)

        # [batch_size, seq_len]
        return nn.functional.softmax(attention_weights.squeeze(-1), dim=-1)
        pass
