import torch
from torch import nn
from torch.nn import Linear

from config import MyModelConfig

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


class Attention(nn.Module):

    def __init__(self, att_hidden_units=(64, 16)):
        super(Attention, self).__init__()
        if MyModelConfig.use_title_entity:
            self.att_linear_a = Linear(in_features=MyModelConfig.entity_embedding_dim * 4,
                                       out_features=att_hidden_units[0])
        else:
            self.att_linear_a = Linear(in_features=MyModelConfig.word_embedding_dim * 4,
                                       out_features=att_hidden_units[0])
        self.att_linear_b = Linear(in_features=att_hidden_units[0], out_features=att_hidden_units[1])
        self.att_final_linear = Linear(in_features=att_hidden_units[-1], out_features=1)

    def forward(self, inputs):
        q, k, v = inputs
        v = k = torch.repeat_interleave(torch.unsqueeze(k, 1), q.shape[1], 1)
        info = torch.cat([q, k, q - k, q * k], dim=-1)
        info = self.att_linear_a(info)
        info = self.att_linear_b(info)
        # outputs (None,maxlen,1) 注意力得分
        outputs = self.att_final_linear(info)

        # outputs (None,maxlen)
        outputs = torch.squeeze(input=outputs, dim=-1)

        # 归一化
        outputs = torch.softmax(outputs, dim=-1)
        outputs = torch.unsqueeze(input=outputs, dim=1)

        # 对value加权求和,得到attention value
        outputs = torch.bmm(outputs, v)
        outputs = torch.squeeze(input=outputs, dim=1)  # (None,embed_dim*behavior_num)
        return outputs
