# import torch.nn as nn
# import torch as t
# from torch.nn import functional as F
# class ATT(nn.Module):
#     def __init__(self, attention_factor=4, l2_reg_w=0, keep_prob=0.5, seed=1024, **kwargs):
#         super(ATT, self).__init__()
#         self.attention_factor = attention_factor
#         self.l2_reg_w = l2_reg_w
#         self.keep_prob = keep_prob
#         self.seed = seed
#
#
#
#
#     def forward(self, inputs, **kwargs):
#
#         self.attention_W = (t.randn(inputs.shape[-1], self.attention_factor)).cuda()
#         self.attention_b = (t.zeros(self.attention_factor, )).cuda()
#         self.projection_h = (t.randn(self.attention_factor, 1)).cuda()
#         inner_product = inputs
#         bi_interaction = inner_product
#         attention_temp = F.relu(t.add(t.dot(bi_interaction, self.attention_W)), self.attention_b)
#         self.normalizer_att_socre = nn.Softmax(t.dot(attention_temp, self.projection_h), dim=1)
#         attention_output = self.normalizer_att_socre * bi_interaction
#
#         return attention_output
#
#     def compute_output_shape(self, input_shape):
#         if not isinstance(input_shape, list):
#             raise ValueError('A "AFMLayer" layer should be called on a list of inputs')
#
#         return (None, self.attention_factor)
#
#     def get_config(self, ):
#         config = {'attention_factor': self.attention_factor,
#                   'l2_reg_w': self.l2_reg_w, 'keep_prob': self.keep_prob, 'seed': self.seed}
#         base_config = super(ATT, self).get_config()
#
#         return dict(list(base_config.items()) + list(config.items()))


import torch
import torch.nn as nn
import torch.nn.functional as F


# class ATT(nn.Module):
#     def __init__(self, attention_factor=4, l2_reg_w=0, keep_prob=0.5, seed=1024):
#         super(ATT, self).__init__()
#         self.attention_factor = attention_factor
#         self.l2_reg_w = l2_reg_w
#         self.keep_prob = keep_prob
#         self.seed = seed
#         embedding_size = None  # 初始化为None
#         # 初始化权重和偏置
#         # self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self.attention_factor)).cuda()
#         # self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor)).cuda()
#         # self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1)).cuda()
#
#
#
#     def forward(self, inputs):
#         # inner_product = inputs
#         # inner_product = inner_product.unsqueeze(1)
#         # bi_interaction = inner_product
#         #
#         # attention_temp = F.relu(torch.bmm(bi_interaction, self.attention_W.unsqueeze(0).expand(bi_interaction.size(0), -1, -1)) + self.attention_b)
#         # self.normalized_att_score = F.softmax(torch.bmm(attention_temp, self.projection_h), dim=1)
#         # attention_output = self.normalized_att_score * bi_interaction
#         # return attention_output
#         inner_product = inputs
#         # 增加一个维度使其成为三维张量
#         bi_interaction = inner_product
#         l1 = nn.Linear(inputs.shape[-1], self.attention_factor).cuda()
#         attention_temp = l1(bi_interaction)
#         attention_temp = F.relu(attention_temp)
#
#         # attention_temp = F.relu(torch.bmm(bi_interaction,
#         #                                   self.attention_W.unsqueeze(0).expand(bi_interaction.size(0), -1,
#         #                                                                        -1)) + self.attention_b)
#
#         # 增加一个维度使其成为三维张量
#         # projection_h = self.projection_h.unsqueeze(0).expand(attention_temp.size(0), -1, -1)
#         l2 = nn.Linear(self.attention_factor, 1).cuda()
#         projection_h = l2(attention_temp)
#         normalized_att_score = F.softmax(projection_h, dim=1)
#         attention_output = normalized_att_score * bi_interaction
#         return attention_output
#
#     def compute_output_shape(self, input_shape):
#         return (None, self.attention_factor)




import torch
import torch.nn as nn
import torch.nn.functional as F



class ATT(nn.Module):
    def __init__(self, attention_factor=4, l2_reg_w=0, keep_prob=0.5, seed=1024):
        super(ATT, self).__init__()
        self.attention_factor = attention_factor
        self.l2_reg_w = l2_reg_w
        self.keep_prob = keep_prob
        self.seed = seed
        self.projection_h = None
        self.attention_layer = None


    def forward(self, inputs):
        embedding_size = inputs.shape[-1]
        inner_product = inputs  # concat_fun(ans,axis=1)
        bi_interaction = inner_product
        if self.attention_layer is None:
            self.attention_layer = nn.Linear(embedding_size, self.attention_factor).cuda()
            nn.init.xavier_uniform_(self.attention_layer.weight)
        if self.projection_h is None:
            self.projection_h = nn.Linear(self.attention_factor, embedding_size, bias=False).cuda()
            nn.init.xavier_uniform_(self.projection_h.weight)

        attention_temp = F.leaky_relu(self.attention_layer(bi_interaction))
        # normalized_att_score = F.softmax(torch.matmul(attention_temp, self.projection_h), dim=1)
        normalized_att_score = F.softmax(F.leaky_relu(self.projection_h(attention_temp)), dim=1)

        attention_output = normalized_att_score * bi_interaction

        return attention_output



