import torch
import torch.nn.functional as F
import dgl
import dgl.function as fn
import functools
import pdb
import os

import torch.nn as nn

from config import device, if_split
from einops import rearrange

os.environ["CUDA_VISIBLE_DEVICES"] = device

if if_split:
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"

class MLP(torch.nn.Module):
    def __init__(self, *sizes, batchnorm=False, dropout=False):
        super().__init__()
        fcs = []
        for i in range(1, len(sizes)):
            fcs.append(torch.nn.Linear(sizes[i - 1], sizes[i]))
            if i < len(sizes) - 1:
                fcs.append(torch.nn.LeakyReLU(negative_slope=0.2))
                if dropout: fcs.append(torch.nn.Dropout(p=0.2))
                if batchnorm: fcs.append(torch.nn.BatchNorm1d(sizes[i]))
        self.layers = torch.nn.Sequential(*fcs)

    def forward(self, x):
        return self.layers(x)
    
class CNNModel(nn.Module):
    def __init__(self, input_channels=1, output_dim=16):
        super(CNNModel, self).__init__()
        #先逐步提取特征再特征压缩
        # 第一层卷积：输入通道1，输出通道32，卷积核3x3，步幅1，padding1保持图像大小不变
        self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=3, stride=1, padding=1)
        self.pool1 = nn.MaxPool2d(2, 2)  # 池化层2x2
        
        # 第二层卷积：输入通道32，输出通道64，卷积核3x3，步幅1，padding1保持图像大小不变
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
        self.pool2 = nn.MaxPool2d(2, 2)  # 池化层2x2
        
        # 第三层卷积：输入通道64，输出通道32，卷积核3x3，步幅1，padding1保持图像大小不变
        self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
        
        # 第四层卷积：输入通道32，输出通道1，卷积核3x3，步幅1，padding1保持图像大小不变
        self.conv4 = nn.Conv2d(32, 1, kernel_size=3, stride=1, padding=1)

        # 全连接层：这里假设经过3次池化后图像大小变为32x32
        self.fc1 = nn.Linear(1 * 32 * 32, output_dim)  # 
        
    def forward(self, x):
        # 第一层卷积 + 池化
        x = self.pool1(self.conv1(x))
        # 第二层卷积 + 池化
        x = self.pool2(self.conv2(x))
        
        # 第三层卷积
        x = self.conv3(x)
        
        # 第四层卷积
        x = self.conv4(x)
        
        # 展平：将多维输入一维化
        x = torch.flatten(x, 1)
        
        # 全连接层
        x = F.relu(self.fc1(x))
 
        return x  


class CrossAttention(nn.Module):
    def __init__(self, input_dim_a, input_dim_b, hidden_dim):
        super(CrossAttention, self).__init__()
 
        self.linear_a = nn.Linear(input_dim_a, hidden_dim)
        self.linear_b = nn.Linear(input_dim_b, hidden_dim)
 
    def forward(self, input_a, input_b):
        # 线性映射
        mapped_a = self.linear_a(input_a)  # (batch_size, seq_len_a, hidden_dim)
        mapped_b = self.linear_b(input_b)  # (batch_size, seq_len_b, hidden_dim)
        y = mapped_b.transpose(1, 2)
 
        # 计算注意力权重
        scores = torch.matmul(mapped_a, mapped_b.transpose(1, 2))  # (batch_size, seq_len_a, seq_len_b)
        attentions_a = torch.softmax(scores, dim=-1)  # 在维度2上进行softmax，归一化为注意力权重 (batch_size, seq_len_a, seq_len_b)
        attentions_b = torch.softmax(scores.transpose(1, 2), dim=-1)  # 在维度1上进行softmax，归一化为注意力权重 (batch_size, seq_len_b, seq_len_a)
 
        # 使用注意力权重来调整输入表示
        output_a = torch.matmul(attentions_b, input_b)  # (batch_size, seq_len_a, input_dim_b)
        output_b = torch.matmul(attentions_a.transpose(1, 2), input_a)  # (batch_size, seq_len_b, input_dim_a)
 
        return output_a, output_b

class NetConv(torch.nn.Module):
    def __init__(self, in_nf, in_ef, out_nf, h1=32, h2=32):
        super().__init__()
        self.in_nf = in_nf
        self.in_ef = in_ef
        self.out_nf = out_nf
        self.h1 = h1
        self.h2 = h2
        
        self.MLP_msg_i2o = MLP(self.in_nf * 2 + self.in_ef, 64, 64, 64, 1 + self.h1 + self.h2)
        self.MLP_reduce_o = MLP(self.in_nf + self.h1 + self.h2, 64, 64, 64, self.out_nf)
        self.MLP_msg_o2i = MLP(self.in_nf * 2 + self.in_ef, 64, 64, 64, 64, self.out_nf)

    def edge_msg_i(self, edges):
        x = torch.cat([edges.src['nf'], edges.dst['nf'], edges.data['ef']], dim=1)
        x = self.MLP_msg_o2i(x)
        return {'efi': x}

    def edge_msg_o(self, edges):
        x = torch.cat([edges.src['nf'], edges.dst['nf'], edges.data['ef']], dim=1)
        x = self.MLP_msg_i2o(x)
        k, f1, f2 = torch.split(x, [1, self.h1, self.h2], dim=1)
        k = torch.sigmoid(k)
        return {'efo1': f1 * k, 'efo2': f2 * k}

    def node_reduce_o(self, nodes):
        x = torch.cat([nodes.data['nf'], nodes.data['nfo1'], nodes.data['nfo2']], dim=1)
        x = self.MLP_reduce_o(x)
        return {'new_nf': x}
        
    def forward(self, g, ts, nf):
        with g.local_scope():
            g.ndata['nf'] = nf
            # input nodes
            g.update_all(self.edge_msg_i, fn.sum('efi', 'new_nf'), etype='net_out')
            # output nodes
            g.apply_edges(self.edge_msg_o, etype='net_in')
            g.update_all(fn.copy_e('efo1', 'efo1'), fn.sum('efo1', 'nfo1'), etype='net_in')
            g.update_all(fn.copy_e('efo2', 'efo2'), fn.max('efo2', 'nfo2'), etype='net_in')
            g.apply_nodes(self.node_reduce_o, ts['output_nodes'])
            
            return g.ndata['new_nf']
        
class NetConv_GAT(torch.nn.Module):
    def __init__(self, in_nf, in_ef, out_nf, h1=32, h2=32, use_virtual_node=True):
        super().__init__()
        self.in_nf = in_nf
        self.in_ef = in_ef
        self.out_nf = out_nf
        self.h1 = h1
        self.h2 = h2
        self.use_virtual_node = use_virtual_node

        # 注意力权重计算模块
        self.attn_src = nn.Linear(in_nf, 1)
        self.attn_dst = nn.Linear(in_nf, 1)
        self.attn_edge = nn.Linear(in_ef, 1)
        self.attn_h1 = nn.Linear(h1, 1)
        self.attn_h2 = nn.Linear(h2, 1)

        # MLP 模块
        self.MLP_msg_i2o = MLP(self.in_nf * 2 + self.in_ef, 64, 64, 64, 1 + self.h1 + self.h2)
        self.MLP_reduce_o = MLP(self.in_nf + self.h1 + self.h2, 64, 64, 64, self.out_nf)
        self.MLP_msg_o2i = MLP(self.in_nf * 2 + self.in_ef, 64, 64, 64, 64, self.out_nf)

        # 虚拟节点相关模块
        if self.use_virtual_node:
            self.virtual_node_fc = nn.Linear(out_nf, out_nf)  # 虚拟节点特征变换

    def apply_attention(self, src, dst, edge_feat):
        alpha_src = torch.sigmoid(self.attn_src(src))
        alpha_dst = torch.sigmoid(self.attn_dst(dst))
        alpha_edge = torch.sigmoid(self.attn_edge(edge_feat))
        weighted_src = alpha_src * src
        weighted_dst = alpha_dst * dst
        weighted_edge = alpha_edge * edge_feat
        return weighted_src, weighted_dst, weighted_edge

    def apply_nattention(self, nf, nfo1, nfo2):
        alpha_nf = torch.sigmoid(self.attn_src(nf))
        alpha_nfo1 = torch.sigmoid(self.attn_h1(nfo1))
        alpha_nfo2 = torch.sigmoid(self.attn_h2(nfo2))
        weighted_nf = alpha_nf * nf
        weighted_nfo1 = alpha_nfo1 * nfo1
        weighted_nfo2 = alpha_nfo2 * nfo2
        return weighted_nf, weighted_nfo1, weighted_nfo2

    def edge_msg_i(self, edges):
        weighted_src, weighted_dst, weighted_edge = self.apply_attention(
            edges.src['nf'], edges.dst['nf'], edges.data['ef']
        )
        x = torch.cat([weighted_src, weighted_dst, weighted_edge], dim=1)
        x = self.MLP_msg_o2i(x)
        return {'efi': x}

    def edge_msg_o(self, edges):
        weighted_src, weighted_dst, weighted_edge = self.apply_attention(
            edges.src['nf'], edges.dst['nf'], edges.data['ef']
        )
        x = torch.cat([weighted_src, weighted_dst, weighted_edge], dim=1)
        x = self.MLP_msg_i2o(x)
        k, f1, f2 = torch.split(x, [1, self.h1, self.h2], dim=1)
        k = torch.sigmoid(k)
        return {'efo1': f1 * k, 'efo2': f2 * k}

    def node_reduce_o(self, nodes):
        weighted_src, weighted_dst, weighted_edge = self.apply_nattention(
            nodes.data['nf'], nodes.data['nfo1'], nodes.data['nfo2']
        )
        x = torch.cat([weighted_src, weighted_dst, weighted_edge], dim=1)
        x = self.MLP_reduce_o(x)
        return {'new_nf': x}

    def forward(self, g, ts, nf):
        with g.local_scope():
            if self.use_virtual_node:
                # 添加虚拟节点
                num_nodes = g.number_of_nodes()
                virtual_node_id = num_nodes
                g.add_nodes(1)
                
                # 将 src 和 dst 移动到与图 g 相同的设备
                device = g.device  # 获取图 g 的设备
                src = torch.arange(num_nodes, device=device)  # 移动到 GPU
                dst = torch.full((num_nodes,), virtual_node_id, device=device)  # 移动到 GPU
                
                g.add_edges(src, dst)
                g.add_edges(dst, src)

                # 初始化虚拟节点特征
                virtual_node_feat = nf.mean(dim=0, keepdim=True)  # 使用节点特征的平均值
                nf = torch.cat([nf, virtual_node_feat], dim=0)

            g.ndata['nf'] = nf
            # 输入节点
            g.update_all(self.edge_msg_i, fn.sum('efi', 'new_nf'), etype='net_out')
            # 输出节点
            g.apply_edges(self.edge_msg_o, etype='net_in')
            g.update_all(fn.copy_e('efo1', 'efo1'), fn.sum('efo1', 'nfo1'), etype='net_in')
            g.update_all(fn.copy_e('efo2', 'efo2'), fn.max('efo2', 'nfo2'), etype='net_in')
            g.apply_nodes(self.node_reduce_o, ts['output_nodes'])

            if self.use_virtual_node:
                # 提取虚拟节点特征并广播
                virtual_node_feat = g.ndata['new_nf'][-1:]  # 虚拟节点的特征
                virtual_node_feat = self.virtual_node_fc(virtual_node_feat)  # 变换虚拟节点特征
                g.ndata['new_nf'] = g.ndata['new_nf'][:-1] + virtual_node_feat  # 广播到所有节点

            return g.ndata['new_nf']
            
class TimingGCN(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.nc1 = NetConv(10, 2, 32)
        self.nc2 = NetConv(32, 2, 32)
        self.nc3 = NetConv(32, 2, 16)  # 16 = 4x delay + 12x arbitrary (might include cap, beta)

    def forward(self, g, ts, groundtruth=False):
        nf0 = g.ndata['nf']
        x = self.nc1(g, ts, nf0)
        x = self.nc2(g, ts, x)
        x = self.nc3(g, ts, x)
        net_delays = x[:, :4]
        return net_delays
    
class TimingGat(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.nc1 = NetConv_GAT(10, 2, 32, use_virtual_node=True)
        self.nc2 = NetConv_GAT(32, 2, 32, use_virtual_node=True)
        self.nc3 = NetConv_GAT(32, 2, 16, use_virtual_node=True)  # 16 = 4x delay + 12x arbitrary

    def forward(self, g, ts, groundtruth=False):
        nf0 = g.ndata['nf']
        x = self.nc1(g, ts, nf0)
        x = self.nc2(g, ts, x)
        x = self.nc3(g, ts, x)
        net_delays = x[:, :4]
        return net_delays
    
# class TimingGat(torch.nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.nc1 = NetConv_GAT(10, 2, 32)
#         self.nc2 = NetConv_GAT(32, 2, 32)
#         self.nc3 = NetConv_GAT(32, 2, 16)  # 16 = 4x delay + 12x arbitrary (might include cap, beta)
#         self.predict_mlp = MLP(16+10+10, 64, 64, 64, 16)
#         self.linear1 = nn.Linear(20, 32)
#         self.linear2 = nn.Linear(20, 32)
#         self.linear3 = nn.Linear(10, 16)
#         self.mlp3 = MLP(20,32,32,16)
#         self.cross_attention = nn.MultiheadAttention(embed_dim=16, num_heads=1, batch_first=True)
#         self.gate_mlp1 = MLP(64,32,1)
#         self.gate_mlp2 = MLP(64,32,2)
#         self.gate_mlp3 = MLP(32,16,1)
#         self.predict_mlp = MLP(16,64,32,16)

#     def forward(self, g, ts, groundtruth=False):
#         nf0 = g.ndata['nf']

#         # 计算全局池化特征 (Mean Pooling + Max Pooling)
#         # mean_pooled = torch.mean(nf0, dim=0)  # (feature_dim,)
#         # max_pooled, _ = torch.max(nf0, dim=0)  # (feature_dim,)
#         # global_pool_feature = torch.cat([mean_pooled, max_pooled], dim=-1)  # (20,)

#         global_pool_feature = torch.add(nf0, dim=0)

#         # **Step 1: 先用 GAT 提取特征**
#         x = self.nc1(g, ts, nf0)
#         x = self.nc2(g, ts, x)
#         x = self.nc3(g, ts, x)  # 输出 x (num_nodes, 16)

#         # **Step 2: 计算不同节点的全局特征**
#         global_proj3 = self.linear3(global_pool_feature)  
#         global_proj3 = global_pool_feature.unsqueeze(0).expand(x.shape[0], -1)  

#         query = x.unsqueeze(1)  
#         key = global_proj3.unsqueeze(1) 
#         value = global_proj3.unsqueeze(1)  

#         attn_output, _ = self.cross_attention(query, key, value) 
#         attn_output = attn_output.squeeze(1) 

#         x = x + attn_output  

#         x = self.predict_mlp(x)

#         net_delays = x[:, :4]  
#         return net_delays

