import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv, GCNConv


class Layer_AGG(nn.Module):
    def __init__(self, in_feat, out_feat, drop_rate=0.6, weight=1, num_layers=2, layers_tree=2):
        super(Layer_AGG, self).__init__()
        self.drop_rate = drop_rate
        self.weight = weight
        self.num_layers = num_layers
        self.layers_tree = layers_tree
        self.convs = nn.ModuleList()
        for i in range(num_layers):
            in_channels = in_feat if i == 0 else out_feat
            self.convs.append(SAGEConv(in_channels, out_feat))
        self.conv_tree = nn.ModuleList()
        self.gating_networks = nn.ModuleList()
        for i in range(0, layers_tree):
            self.conv_tree.append(SAGEConv(in_feat, out_feat))
            self.gating_networks.append(nn.Linear(out_feat, 1))
        self.bias = nn.Parameter(torch.zeros(layers_tree))

    def forward(self, x, edge_index):
        h = x
        layer_outputs = []
        for i in range(self.num_layers):
            x = self.convs[i](x, edge_index[0])
            if i != self.num_layers - 1:  # 最后一层没有激活和dropout
                x = F.relu(x)
                x = F.dropout(x, p=self.drop_rate, training=self.training)

        for i in range(0, self.layers_tree):
            temp = self.conv_tree[i](h, edge_index[1][i])
            temp = F.relu(temp)
            temp = F.dropout(temp, p=self.drop_rate, training=self.training)
            layer_outputs.append(temp)
        # print(layer_outputs[0].shape)

        weighted_sums = [self.gating_networks[i](layer_outputs[i]) for i in range(self.layers_tree)]

        # print(weighted_sums[0].shape)

        alpha = F.softmax(torch.stack(weighted_sums, dim=-1), dim=-1)

        # print(alpha.shape)
        x_tree = torch.zeros_like(layer_outputs[0])
        for i in range(self.layers_tree):
            weight = alpha[:, :, i]
            x_tree += layer_outputs[i] * weight

        return x + self.weight * x_tree


class MetaSimilarity(nn.Module):

    def __init__(self, hidden_dim, temperature=0.5):
        super().__init__()
        self.temperature = temperature
        self.projection = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim)
        )

    def forward(self, z, dist_matrix):
        # 投影到相似度空间
        z_proj = self.projection(z)

        # 计算模型相似度矩阵
        sim_matrix = torch.mm(z_proj, z_proj.t()) / self.temperature

        # 生成目标相似度矩阵（基于最短距离）
        with torch.no_grad():
            valid_dist = torch.where(dist_matrix > 0, dist_matrix.float(), torch.tensor(float('nan')))

            # 计算忽略NaN的最大值
            valid_dist_flat = valid_dist.flatten()
            non_nan_values = valid_dist_flat[~torch.isnan(valid_dist_flat)]
            if len(non_nan_values) == 0:
                max_dist = 1.0  # 全为NaN时设为1避免除零
            else:
                max_dist = torch.max(non_nan_values)

            scaled_dist = valid_dist / max_dist
            target_sim = torch.exp(-scaled_dist)
            target_sim = torch.nan_to_num(target_sim, nan=0.0)

        # 计算KL散度损失
        loss = F.kl_div(
            F.log_softmax(sim_matrix, dim=-1),
            F.softmax(target_sim, dim=-1),
            reduction='batchmean'
        )
        return loss


# 针对多关系图
class multi_HOGRL_Model(nn.Module):
    def __init__(self, in_feat, out_feat, relation_nums=3, hidden=32, drop_rate=0.6, weight=1, num_layers=2,
                 layers_tree=2, meta_weight=0.5):
        super(multi_HOGRL_Model, self).__init__()
        self.relation_nums = relation_nums
        self.drop_rate = drop_rate
        self.weight = weight
        self.layers_tree = layers_tree
        for i in range(relation_nums):
            setattr(self, 'Layers' + str(i),
                    Layer_AGG(in_feat, hidden, self.drop_rate, self.weight, num_layers, self.layers_tree))
        self.linear = nn.Linear(hidden * relation_nums, out_feat)
        
        # 新增模块
        self.meta_weight = meta_weight
        self.meta_sim = MetaSimilarity(hidden * relation_nums)  

    def forward(self, x, edge_index, dist_matrix=None):

        layer_outputs = []

        for i in range(self.relation_nums):
            layer_output = getattr(self, 'Layers' + str(i))(x, edge_index[i])
            layer_outputs.append(layer_output)

        x_temp = torch.cat(layer_outputs, dim=1)

        # 新增损失计算
        meta_loss = torch.tensor(0.).to(x.device)
        if dist_matrix is not None:
            meta_loss = self.meta_sim(x_temp, dist_matrix) * self.meta_weight

        x = self.linear(x_temp)
        x = F.log_softmax(x, dim=1)
        return x, x_temp, meta_loss


class Graphsage(nn.Module):
    def __init__(self, in_feat, out_feat):
        super(Graphsage, self).__init__()
        self.conv1 = SAGEConv(in_feat, out_feat)
        self.conv2 = SAGEConv(out_feat, out_feat)
        # self.conv1 = GCNConv(in_feat, out_feat)
        # self.conv2 = GCNConv(out_feat, out_feat)
        self.linear = nn.Linear(out_feat, 2)

    def forward(self, x, edge_index):
        x = self.conv1(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, p=0.6, training=self.training)
        x = self.conv2(x, edge_index)
        x = F.relu(x)
        x = F.dropout(x, p=0.6, training=self.training)
        x = self.linear(x)
        x = F.log_softmax(x, dim=1)
        return x