"""
实现attention
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import chatbot.config as config


class Attention(nn.Module):
    def __init__(self, method="general"):
        super(Attention, self).__init__()
        assert method in ["dot", "general", "concat"], "method error"
        self.method = method
        if self.method == "general":
            self.wa = nn.Linear(config.chatbot_encoder_hidden_size, config.chatbot_decoder_hidden_size)

        if self.method == "concat":
            self.wa = nn.Linear(config.chatbot_encoder_hidden_size + config.chatbot_decoder_hidden_size,
                                config.chatbot_decoder_hidden_size, bias=False)
            self.va = nn.Linear(config.chatbot_decoder_hidden_size, 1)

    def forward(self, hidden_state, encoder_outputs):
        """
        :param hidden_state: [num_layers,batch_size,decoder_hidden_size]
        :param encoder_outputs: [batch_size,seq_len,hidden_size]
        :return:
        """
        # dot计算
        if self.method == "dot":
            hidden_state = hidden_state[-1, :, :].unsqueeze(-1)  # [batch_size,hidden_size,1]
            attention_weight = encoder_outputs.bmm(hidden_state).squeeze(-1)  # [batch_size,seq_len]
            attention_weight = F.softmax(attention_weight, dim=-1)  # [batch_size,seq_len]
        elif self.method == "general":
            encoder_outputs = self.wa(encoder_outputs)  # [batch_size,seq_len,decoder_hidden_size]
            hidden_state = hidden_state[-1, :, :].unsqueeze(-1)  # [batch_size,decoder_hidden_size,1]
            attention_weight = encoder_outputs.bmm(hidden_state).squeeze(-1)  # [batch_size,seq_len]
            attention_weight = F.softmax(attention_weight, dim=-1)  # [batch_size,seq_len]
        elif self.method == "concat":
            hidden_state = hidden_state[-1, :, :].squeeze(0)  # [batch_size,decoder_hidden_size]
            hidden_state = hidden_state.repeat(1, encoder_outputs.size(1),
                                               1)  # [batch_size,seq_len,decoder_hidden_size]
            concated = torch.cat([hidden_state, encoder_outputs],
                                 dim=-1)  # [batch_size,seq_len,encoder_hidden_size+decoder_hidden_size]
            batch_size = encoder_outputs.size(0)
            encoder_seq_len = encoder_outputs.size(1)
            attention_weight = self.va(F.tanh(self.wa(concated.view((batch_size * encoder_seq_len, -1))))).squeeze(
                -1)  # [batch_size*seq_len]
            attention_weight = F.softmax(attention_weight.view(batch_size, encoder_seq_len),
                                         dim=-1)  # [batch_size,seq_len]

        return attention_weight
