import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import GraphAttentionLayer, SpGraphAttentionLayer

class GAT(nn.Module):
    def __init__(self, nfeat, nhid, nclass, dropout_input,dropout_hidden, alpha, nheads):
        """Dense version of GAT."""
        super(GAT, self).__init__()
        self.alpha = alpha
        self.dropout_input = dropout_input
        self.dropout_hidden = dropout_hidden
        self.leakyrelu = nn.LeakyReLU(self.alpha)

        self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout_hidden, 
                                               alpha=alpha, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)

        self.bn1 = nn.BatchNorm1d(nhid * nheads)

        self.out_att = GraphAttentionLayer(nhid * nheads, 1, dropout=dropout_hidden, 
                                           alpha=alpha, concat=False,is_output_layer=True)

        # self.bn2 = nn.BatchNorm1d(1)

    def forward(self, x, adj):
        x = F.dropout(x, self.dropout_input, training=self.training)
        x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
        x = self.bn1(x)
        x = self.leakyrelu(x)
        x = F.dropout(x, self.dropout_hidden, training=self.training)
        x = self.out_att(x, adj)
        # x = self.bn2(x)
        x = self.leakyrelu(x)
        return x.squeeze(-1)

class ResGAT(nn.Module):
    def __init__(self, nfeat, nhid, nclass, dropout_input, dropout_hidden, alpha, nheads):
        """Dense version of GAT with residual connections."""
        super(ResGAT, self).__init__()
        self.alpha = alpha
        self.dropout_input = dropout_input
        self.dropout_hidden = dropout_hidden
        self.leakyrelu = nn.LeakyReLU(self.alpha)
        
        # Input projection for residual connection
        self.input_proj = nn.Linear(nfeat, nhid * nheads)
        self.output_proj = nn.Linear(nhid * nheads, 1)
        # self.proj = nn.Linear(nfeat, 1)
        self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout_hidden,
                                             alpha=alpha, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)
        self.bn1 = nn.BatchNorm1d(nhid * nheads)
        self.out_att = GraphAttentionLayer(nhid * nheads, 1, dropout=dropout_hidden,
                                         alpha=alpha, concat=False, is_output_layer=True)
        
    # 可以考虑模型稍微复杂一点（爬山一样的曲线）
    def forward(self, x, adj):
        original_x = x
        x = F.dropout(x, self.dropout_input, training=self.training)
        x_att = torch.cat([att(x, adj) for att in self.attentions], dim=1)
        x_att = self.bn1(x_att)
        x_att = self.leakyrelu(x_att)
        x_res = self.input_proj(original_x)
        x = x_att + x_res
        x = F.dropout(x, self.dropout_input, training=self.training)
        x = self.out_att(x, adj)
        x = self.leakyrelu(x)
        return x.squeeze(-1) 

# class MLP(nn.Module):
#     def __init__(self, nfeat, alpha=None, nheads = 1, mean_weight=0.5):
#         super(MLP, self).__init__()
#         self.mean_weight = mean_weight  # 均值特征的权重系数
#         self.alpha = alpha
#         self.fc1 = nn.Linear(nfeat, 64)
#         self.sigmoid = nn.Sigmoid()
#         self.bn = nn.BatchNorm1d(64)
#         self.fc2 = nn.Linear(64, 64)
#         self.fc3 = nn.Linear(64, 64)
#         self.leakyrelu = nn.LeakyReLU(self.alpha)
#         self.out_fc = nn.Linear(64, 1)
        
#     def forward(self, x):
#         mean_feature = torch.mean(x, dim=1, keepdim=True)
#         x = self.fc1(x)
#         x = self.sigmoid(x)
#         x = self.bn(x)
#         x = self.fc2(x)
#         x = self.sigmoid(x)
#         x = self.bn(x)
#         x = self.fc3(x)
#         x = self.leakyrelu(x)
#         x = self.out_fc(x)
#         output = (1 - self.mean_weight) * x + self.mean_weight * mean_feature
#         return output.squeeze(-1)  

# class EnhancedMLP(nn.Module):
#     def __init__(self, nfeat, dropout_rate=0.3, mean_weight=0.7):
#         super(EnhancedMLP, self).__init__()
#         self.mean_weight = mean_weight  # 均值特征的权重系数
        
#         # 主网络
#         self.fc1 = nn.Linear(nfeat, 32)  
#         self.bn1 = nn.BatchNorm1d(32)
#         self.relu = nn.ReLU()
#         self.dropout = nn.Dropout(dropout_rate)
#         self.fc2 = nn.Linear(32, 16)
#         self.bn2 = nn.BatchNorm1d(16)
#         self.fc3 = nn.Linear(16, 1)
#         self.mean_weight = mean_weight
        
#     def forward(self, x):
#         # 计算特征均值
#         mean_feature = torch.mean(x, dim=1, keepdim=True)
        
#         # 主网络前向传播
#         h = self.fc1(x)
#         h = self.bn1(h)
#         h = self.relu(h)
#         h = self.dropout(h)
#         h = self.fc2(h)
#         h = self.bn2(h)
#         h = self.relu(h)
#         h = self.fc3(h)
        
#         # 结合神经网络输出和均值预测
#         output = (1 - self.mean_weight) * h + self.mean_weight * mean_feature
        
#         return output.squeeze(-1)

class MLP(nn.Module):
    def __init__(self, nfeat, nhid, dropout_input, dropout_hidden, alpha=None, nheads=1):
        """
        MLP version of GAT.
        - nfeat: input feature dimension
        - nhid: hidden layer size
        - dropout: dropout rate
        """
        super(MLP, self).__init__()
        self.dropout_input = dropout_input
        self.dropout_hidden = dropout_hidden

        # Replace multi-head GAT with multiple linear layers, then concatenate their outputs
        self.fcs = nn.ModuleList([nn.Linear(nfeat, nhid) for _ in range(nheads)])
        self.out_fc = nn.Linear(nhid * nheads, 1)  # output layer to match original `1` output

    def forward(self, x, adj=None):
        x = F.dropout(x, self.dropout_input, training=self.training)
        x = torch.cat([F.elu(fc(x)) for fc in self.fcs], dim=1)
        x = F.dropout(x, self.dropout_hidden, training=self.training)
        x = self.out_fc(x)
        return x.squeeze(-1)
