import torch.nn as nn
import torch
import torch.nn.functional as F
# 定义注意力机制模型
class Atten_model(torch.nn.Module):
    def __init__(self, in_dim, out_dim):
        super(Atten_model, self).__init__()
        self.k = nn.Linear(in_dim, out_dim)
        self.q = nn.Linear(in_dim, out_dim)
        self.v = nn.Linear(in_dim, out_dim)
        self.relu = nn.ReLU()

    def forward(self, x):
        # k q v 均将x从in_dim转变为out_dim，特征拓展、特征对应一个权重
        k = self.k(x)
        q = self.q(x)
        v = self.v(x)
        # 点乘计算注意力
        atten = F.softmax((k * q) / torch.sqrt(torch.tensor(v.shape[1])), dim=1)
        # 特征值
        out = atten * v
        return self.relu(out)


class NeuralNet(torch.nn.Module):
    def __init__(self, in_dim, out_dim):
        super(NeuralNet, self).__init__()
        self.layer_1 = Atten_model(in_dim, 10)
        self.layer_2 = Atten_model(10, 20)
        #         self.layer_3 = Atten_model(64, 132)
        self.linear = nn.Linear(20, out_dim)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        out = self.layer_1(x)
        out = self.layer_2(out)
        #         out = self.layer_3(out)
        out = self.linear(out)
        out = self.sigmoid(out)
        return out

