import math
import torch
from torch import Tensor
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.utils import softmax
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np

class TransformerConv(MessagePassing):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        heads: int = 1,
        concat: bool = True,
        beta: bool = False,
        dropout: float = 0.0,
        edge_dim: int = None,
        bias: bool = True,
        root_weight: bool = True,
        boost_factor: float = 1.5,  # 🔴 新增：激励因子
        **kwargs,
    ):
        kwargs.setdefault("aggr", "add")
        super().__init__(node_dim=0, **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.heads = heads
        self.concat = concat
        self.beta = beta
        self.dropout = dropout
        self.edge_dim = edge_dim
        self.root_weight = root_weight
        self.boost_factor = boost_factor  # 🔴 存储激励因子

        # 定义 Q, K, V 的线性变换
        self.lin_query = Linear(in_channels, heads * out_channels, bias=False)
        self.lin_key = Linear(in_channels, heads * out_channels, bias=False)
        self.lin_value = Linear(in_channels, heads * out_channels, bias=False)

        # 边特征的线性变换（如果存在边特征）
        if edge_dim is not None:
            self.lin_edge = Linear(edge_dim, heads * out_channels, bias=False)
        else:
            self.lin_edge = None

        # 跳跃连接的线性变换
        if concat:
            self.lin_skip = Linear(in_channels, heads * out_channels, bias=bias)
        else:
            self.lin_skip = Linear(in_channels, out_channels, bias=bias)

        self.reset_parameters()

    def reset_parameters(self):
        self.lin_query.reset_parameters()
        self.lin_key.reset_parameters()
        self.lin_value.reset_parameters()
        if self.lin_edge:
            self.lin_edge.reset_parameters()
        self.lin_skip.reset_parameters()

    def forward(self, x: Tensor, edge_index: Tensor, edge_attr: Tensor = None, node_labels: list = None):
        H, C = self.heads, self.out_channels

        # 检查 node_labels 的长度是否与节点数一致
        if node_labels is not None:
            if len(node_labels) != x.size(0):
                raise ValueError(
                    f"The size of node_labels ({len(node_labels)}) does not match the number of nodes ({x.size(0)})"
                )

        # 计算 Q, K, V
        query = self.lin_query(x).view(-1, H, C)
        key = self.lin_key(x).view(-1, H, C)
        value = self.lin_value(x).view(-1, H, C)

        # 判断是否有节点标签
        if node_labels is not None:
            # 匹配包含 "traffic" 的标签
            traffic_mask = torch.tensor(
                ["traffic" in str(label) for label in node_labels],
                dtype=torch.bool,
                device=query.device,
            )

            if traffic_mask.any():  # 仅在存在流量节点时打印
                boost = traffic_mask.unsqueeze(1).unsqueeze(2)  # [N, 1, 1]
                # 增强 Q, K, V
                query = query * (1 + self.boost_factor * boost)
                key = key * (1 + self.boost_factor * boost)
                value = value * (1 + self.boost_factor * boost)

        self.alpha = None  # 🔴 初始化 self.alpha
        # 消息传递
        out = self.propagate(edge_index, query=query, key=key, value=value, edge_attr=edge_attr)

        if self.concat:
            out = out.view(-1, H * C)
        else:
            out = out.mean(dim=1)

        if self.root_weight:
            x_r = self.lin_skip(x)
            out = out + x_r

        return out



    def message(self, query_i: Tensor, key_j: Tensor, value_j: Tensor, index: Tensor, ptr: Tensor = None):
        alpha = (query_i * key_j).sum(dim=-1) / math.sqrt(query_i.size(-1))
        alpha = softmax(alpha, index, ptr)
        self.alpha = alpha  # 🔴
        alpha = F.dropout(alpha, p=self.dropout, training=self.training)
        return value_j * alpha.view(-1, self.heads, 1)

# 🔴 可视化注意力分数并保存为图像
def visualize_attention(alpha, node_labels, edge_index, save_path="attention_weights.png"):
    """
    可视化注意力分数，突出流量节点的权重，并保存为图像。
    :param alpha: 注意力权重 (Tensor)，形状为 [num_edges, heads]
    :param node_labels: 节点标签 (list of str)，长度为 num_nodes
    :param edge_index: 边索引 (Tensor)，形状为 [2, num_edges]
    :param save_path: 保存路径 (str)，默认是 "attention_weights.png"
    """
    alpha_np = alpha.cpu().detach().numpy()  # 转为 numpy 数组
    num_edges = alpha_np.shape[0]  # 🔴 获取边的数量

    # 🔴 计算每条边是否包含流量节点
    src_nodes, dst_nodes = edge_index  # 边索引拆分为源节点和目标节点
    traffic_mask = np.array([
        "traffic" in str(node_labels[src]) or "traffic" in str(node_labels[dst])
        for src, dst in zip(src_nodes.cpu().numpy(), dst_nodes.cpu().numpy())
    ])  # 🔴 创建流量掩码

    traffic_alpha = alpha_np * traffic_mask.reshape(-1, 1)  # 🔴 按边筛选注意力权重

    # 🔴 创建热力图
    plt.figure(figsize=(12, 10))
    sns.heatmap(
        traffic_alpha.reshape(-1, 1),
        annot=True,
        fmt=".2f",
        cmap="Blues",
        xticklabels=["Attention"],
        yticklabels=[f"Edge {i}" for i in range(len(traffic_alpha))]
    )
    plt.title("Attention Weights (Alpha) - Traffic Nodes Focused")
    plt.ylabel("Edges")
    plt.xlabel("Attention Score")

    # 保存图像到文件
    plt.savefig(save_path)
    plt.close()  # 关闭图像以释放内存
