from torch.nn import Linear, ReLU, Parameter
from torch_geometric.nn import MessagePassing
import torch
import torch.nn as nn

# 注意力层

# propagate(): f_att(eps_i, eps_N_i) = eps_i + sum(a_ij * eps_j)

#     message(): a_ij * eps_j
#     aggregate(): sum()
#     update: eps_i + aggr_out


class Att_layer(MessagePassing):
    def __init__(self, in_channels=32, H=2, C=32):
        super().__init__(aggr="add")
        self.H = H
        self.C = C
        self.linear_source = Linear(in_channels, H * C, bias=True)
        self.linear_target = Linear(in_channels, H * C, bias=True)
        self.activation = ReLU()
        self.attn_source = MultiHeadLinear(num_channels=C, heads=H, bias=True)
        self.attn_target = MultiHeadLinear(num_channels=C, heads=H, bias=True)

    def forward(self, x, edge_index):
        H = self.H
        C = self.C
        x_s = x * 1
        x_t = x * 1
        x_s = self.linear_source(x_s).view(-1, H, C)
        x_t = self.linear_target(x_t).view(-1, H, C)
        x_s, x_t = self.activation(x_s), self.activation(x_t)
        alpha_s = self.attn_source(x_s)
        alpha_t = self.attn_target(x_t)
        x_s = x_s.view(-1, H * C)
        x_t = x_t.view(-1, H * C)

        out = self.propagate(
            edge_index,
            x_source=x_s,
            x_target=x_t,
            alpha_source=alpha_s,
            alpha_target=alpha_t,
        ).view(-1, H, C)

        out += x_s.view(-1, H, C)
        out = out.view(-1, H * C)
        return out

    def message(self, x_target_j, alpha_source_i, alpha_target_j):
        alpha = alpha_source_i + alpha_target_j
        alpha = torch.sigmoid(alpha)
        x_target_j = x_target_j.view(-1, self.H, self.C)
        out = (x_target_j * alpha.unsqueeze(-1)).view(-1, self.H * self.C)
        return out


class MultiHeadLinear(nn.Module):
    def __init__(self, num_channels, heads=1, bias=False):
        super().__init__()
        self.num_channels = num_channels
        self.heads = heads

        self.weight = Parameter(torch.Tensor(heads, num_channels))
        self.bias = Parameter(torch.Tensor(heads))

    def forward(self, x):
        x = (x * self.weight).sum(dim=-1)
        return x + self.bias
