import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, GATConv, TransformerConv, global_mean_pool, global_max_pool, global_add_pool
from torch_geometric.nn.norm import GraphNorm, LayerNorm
import math
import numpy as np

class MultiHeadGraphAttention(nn.Module):
    """多头图注意力层，支持边特征"""
    def __init__(self, in_dim, out_dim, num_heads=8, dropout=0.1, edge_dim=None):
        super().__init__()
        self.num_heads = num_heads
        self.out_dim = out_dim
        self.head_dim = out_dim // num_heads
        
        assert out_dim % num_heads == 0, "out_dim must be divisible by num_heads"
        
        # 使用TransformerConv替代简单的GCN，支持边特征和注意力
        self.conv = TransformerConv(
            in_channels=in_dim,
            out_channels=out_dim,
            heads=num_heads,
            dropout=dropout,
            edge_dim=edge_dim,
            concat=False  # 不拼接多头，而是平均
        )
        
        self.norm = GraphNorm(out_dim)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, x, edge_index, edge_attr=None, batch=None):
        # 应用Transformer卷积
        h = self.conv(x, edge_index, edge_attr)
        
        # 图归一化
        h = self.norm(h, batch)
        
        # 残差连接（如果维度匹配）
        if x.size(-1) == h.size(-1):
            h = h + x
            
        h = self.dropout(h)
        return h

class HierarchicalGraphPooling(nn.Module):
    """分层图池化，结合多种池化策略"""
    def __init__(self, hidden_dim):
        super().__init__()
        self.hidden_dim = hidden_dim
        
        # 不同池化策略的权重网络
        self.pool_weights = nn.Sequential(
            nn.Linear(hidden_dim * 3, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 3),
            nn.Softmax(dim=-1)
        )
        
        # 自适应池化
        self.adaptive_pool = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1),
            nn.Sigmoid()
        )
        
    def forward(self, x, batch):
        # 三种基础池化
        mean_pool = global_mean_pool(x, batch)  # [batch_size, hidden_dim]
        max_pool = global_max_pool(x, batch)    # [batch_size, hidden_dim]
        sum_pool = global_add_pool(x, batch)    # [batch_size, hidden_dim]
        
        # 计算自适应权重
        combined = torch.cat([mean_pool, max_pool, sum_pool], dim=-1)
        weights = self.pool_weights(combined)  # [batch_size, 3]
        
        # 加权组合
        pooled = (weights[:, 0:1] * mean_pool + 
                 weights[:, 1:2] * max_pool + 
                 weights[:, 2:3] * sum_pool)
        
        # 自适应门控
        gate = self.adaptive_pool(pooled)
        pooled = pooled * gate
        
        return pooled

class TemporalAwareGNN(nn.Module):
    """时间感知的图神经网络"""
    def __init__(self, node_dim, edge_dim, hidden_dim, num_layers=3, num_heads=8, dropout=0.1):
        super().__init__()
        self.num_layers = num_layers
        self.hidden_dim = hidden_dim
        
        # 输入投影
        self.node_proj = nn.Linear(node_dim, hidden_dim)
        self.edge_proj = nn.Linear(edge_dim, hidden_dim) if edge_dim > 0 else None
        
        # 多层图注意力
        self.gnn_layers = nn.ModuleList()
        for i in range(num_layers):
            self.gnn_layers.append(
                MultiHeadGraphAttention(
                    in_dim=hidden_dim,
                    out_dim=hidden_dim,
                    num_heads=num_heads,
                    dropout=dropout,
                    edge_dim=hidden_dim if edge_dim > 0 else None
                )
            )
        
        # 分层池化
        self.pooling = HierarchicalGraphPooling(hidden_dim)
        
        # 时间编码
        self.temporal_encoding = nn.Sequential(
            nn.Linear(2, hidden_dim // 4),  # [remaining_time, active_ratio]
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, hidden_dim)
        )
        
        # 最终融合层
        self.fusion = nn.Sequential(
            nn.Linear(hidden_dim * 2, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, hidden_dim)
        )
        
    def forward(self, data, temporal_info=None):
        """
        data: PyG Data对象
        temporal_info: [remaining_time_norm, active_ratio] 时间信息
        """
        x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
        
        # 节点特征投影
        h = self.node_proj(x)
        
        # 边特征投影
        if edge_attr is not None and self.edge_proj is not None:
            edge_attr = self.edge_proj(edge_attr)
        
        # 多层GNN传播
        for i, gnn_layer in enumerate(self.gnn_layers):
            h_new = gnn_layer(h, edge_index, edge_attr, batch)
            # Layer-wise残差连接
            if i > 0:
                h = h + h_new
            else:
                h = h_new
        
        # 分层池化得到图表示
        graph_emb = self.pooling(h, batch)
        
        # 时间信息编码
        if temporal_info is not None:
            temporal_emb = self.temporal_encoding(temporal_info)
            # 如果是批处理，需要匹配维度
            if temporal_emb.dim() == 1:
                temporal_emb = temporal_emb.unsqueeze(0)
            if graph_emb.dim() == 1:
                graph_emb = graph_emb.unsqueeze(0)
        else:
            temporal_emb = torch.zeros_like(graph_emb)
        
        # 融合图表示和时间信息
        combined = torch.cat([graph_emb, temporal_emb], dim=-1)
        final_emb = self.fusion(combined)
        
        # 如果输入是单图，返回一维向量
        if final_emb.size(0) == 1:
            final_emb = final_emb.squeeze(0)
            
        return final_emb

class CrossAttentionQNetwork(nn.Module):
    """交叉注意力Q网络"""
    def __init__(self, graph_hid_dim, agent_feat_dim, q_hid_dim, num_heads=8, dropout=0.1):
        super().__init__()
        self.graph_hid_dim = graph_hid_dim
        self.agent_feat_dim = agent_feat_dim
        self.num_heads = num_heads
        self.head_dim = q_hid_dim // num_heads
        
        # 多头交叉注意力
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=q_hid_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 特征投影层
        self.graph_proj = nn.Linear(graph_hid_dim, q_hid_dim)
        self.action_proj = nn.Linear(agent_feat_dim, q_hid_dim)
        
        # 位置编码（为动作序列）
        self.pos_encoding = PositionalEncoding(q_hid_dim, max_len=1000)
        
        # 自注意力层（用于动作特征内部交互）
        self.self_attention = nn.MultiheadAttention(
            embed_dim=q_hid_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 前馈网络
        self.ffn = nn.Sequential(
            nn.Linear(q_hid_dim, q_hid_dim * 2),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(q_hid_dim * 2, q_hid_dim)
        )
        
        # 层归一化
        self.norm1 = nn.LayerNorm(q_hid_dim)
        self.norm2 = nn.LayerNorm(q_hid_dim)
        self.norm3 = nn.LayerNorm(q_hid_dim)
        
        # 最终输出层
        self.output_proj = nn.Sequential(
            nn.Linear(q_hid_dim, q_hid_dim // 2),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(q_hid_dim // 2, 1)
        )
        
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, graph_emb, action_feats):
        """
        graph_emb: [graph_hid_dim] 或 [B, graph_hid_dim]
        action_feats: [K, agent_feat_dim] 或 [B, agent_feat_dim]
        """
        # 处理维度
        if graph_emb.dim() == 1:
            # 推理模式：单图多动作
            K = action_feats.size(0)
            graph_emb = graph_emb.unsqueeze(0).expand(K, -1)  # [K, graph_hid_dim]
            batch_mode = False
        else:
            # 训练模式：批量处理
            batch_mode = True
            K = action_feats.size(0)
        
        # 特征投影
        graph_feat = self.graph_proj(graph_emb)  # [K, q_hid_dim] 或 [B, q_hid_dim]
        action_feat = self.action_proj(action_feats)  # [K, q_hid_dim] 或 [B, q_hid_dim]
        
        if not batch_mode:
            # 推理模式：需要处理序列
            # 添加位置编码
            action_feat = self.pos_encoding(action_feat.unsqueeze(0)).squeeze(0)  # [K, q_hid_dim]
            
            # 将动作特征转为序列格式用于自注意力
            action_seq = action_feat.unsqueeze(0)  # [1, K, q_hid_dim]
            
            # 自注意力（动作之间的交互）
            action_attn, _ = self.self_attention(action_seq, action_seq, action_seq)
            action_feat = self.norm1(action_feat + action_attn.squeeze(0))
            
            # 交叉注意力（图特征作为query，动作特征作为key和value）
            graph_query = graph_feat.unsqueeze(0)  # [1, K, q_hid_dim]
            action_kv = action_feat.unsqueeze(0)   # [1, K, q_hid_dim]
            
            cross_attn, attn_weights = self.cross_attention(graph_query, action_kv, action_kv)
            combined_feat = self.norm2(graph_feat + cross_attn.squeeze(0))
            
        else:
            # 训练模式：批量处理
            # 直接使用图特征和动作特征
            combined_feat = self.norm1(graph_feat + action_feat)
        
        # 前馈网络
        ffn_out = self.ffn(combined_feat)
        final_feat = self.norm3(combined_feat + self.dropout(ffn_out))
        
        # 输出Q值
        q_values = self.output_proj(final_feat).squeeze(-1)
        
        return q_values

class PositionalEncoding(nn.Module):
    """位置编码"""
    def __init__(self, d_model, max_len=1000):
        super().__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)
        
    def forward(self, x):
        return x + self.pe[:x.size(1), :].unsqueeze(0)

class AdvancedVersionedDependencyGraph:
    """增强版的版本化依赖图构建器"""
    def __init__(self, dependency_matrix, jump_matrix, upgrade_time, device):
        self.dep = torch.tensor(dependency_matrix, dtype=torch.long, device=device)
        self.jump = torch.tensor(jump_matrix, dtype=torch.float, device=device)
        self.ut = torch.tensor(upgrade_time, dtype=torch.float, device=device)
        self.N, _, self.V = self.dep.shape
        self.device = device
        
        # 预计算图统计信息
        self._precompute_graph_stats()
        
    def _precompute_graph_stats(self):
        """预计算图的统计信息用于特征工程"""
        # 依赖复杂度：每个组件作为依赖项被多少其他组件依赖
        self.dependency_complexity = torch.zeros(self.N, device=self.device)
        for i in range(self.N):
            for j in range(self.N):
                if i != j:
                    # 计算组件i被组件j依赖的版本数
                    deps = (self.dep[j, i, :] >= 0).sum()
                    self.dependency_complexity[i] += deps
        
        # 升级路径长度统计
        self.avg_upgrade_paths = torch.zeros(self.N, self.V, device=self.device)
        for i in range(self.N):
            for v in range(self.V):
                # 从版本v可以升级到的版本数
                reachable = (self.jump[i, v, :] > 0).sum()
                self.avg_upgrade_paths[i, v] = reachable.float()
    
    def build_graph(self, cur_versions, remaining_time=None, active_upgrades=None):
        """
        构建增强的图表示
        cur_versions: 当前版本 [N]
        remaining_time: 剩余时间
        active_upgrades: 正在升级的组件集合
        """
        # 基础节点特征
        v_oh = F.one_hot(cur_versions.long(), num_classes=self.V).float()
        
        # 跳跃能力特征
        jm = self.jump[torch.arange(self.N, device=self.device), cur_versions]  # [N, V]
        
        # 升级时间特征（多维度）
        ut_from_cur = self.ut[torch.arange(self.N, device=self.device), cur_versions]  # [N, V]
        avg_ut = ut_from_cur.mean(dim=1, keepdim=True)  # [N, 1]
        min_ut = ut_from_cur.min(dim=1, keepdim=True)[0]  # [N, 1]
        max_ut = ut_from_cur.max(dim=1, keepdim=True)[0]  # [N, 1]
        
        # 依赖复杂度特征
        dep_complexity = self.dependency_complexity.unsqueeze(1)  # [N, 1]
        
        # 升级路径特征
        upgrade_paths = self.avg_upgrade_paths[torch.arange(self.N, device=self.device), cur_versions].unsqueeze(1)  # [N, 1]
        
        # 进度特征
        progress = cur_versions.float().unsqueeze(1) / (self.V - 1)  # [N, 1]
        
        # 状态特征（是否正在升级）
        if active_upgrades is not None:
            upgrading_status = torch.tensor([1.0 if i in active_upgrades else 0.0 for i in range(self.N)], 
                                          device=self.device).unsqueeze(1)  # [N, 1]
        else:
            upgrading_status = torch.zeros(self.N, 1, device=self.device)
        
        # 拼接所有节点特征
        x = torch.cat([
            v_oh,               # 版本one-hot [N, V]
            jm,                 # 跳跃能力 [N, V]
            avg_ut, min_ut, max_ut,  # 升级时间统计 [N, 3]
            dep_complexity,     # 依赖复杂度 [N, 1]
            upgrade_paths,      # 升级路径数 [N, 1]
            progress,           # 升级进度 [N, 1]
            upgrading_status    # 升级状态 [N, 1]
        ], dim=1)  # [N, 2V+7]
        
        # 构建边和边特征
        src, dst, edge_features = [], [], []
        
        for i in range(self.N):
            vi = cur_versions[i]
            deps = self.dep[i, :, vi]  # [N]
            mask = (deps >= 0)
            js = torch.nonzero(mask, as_tuple=False).view(-1)
            
            if js.size(0) > 0:
                src.append(torch.full((js.size(0),), i, dtype=torch.long, device=self.device))
                dst.append(js)
                
                # 边特征：[要求版本, 紧急程度, 升级成本]
                required_versions = deps[js].float().unsqueeze(1)  # [E_i, 1]
                
                # 紧急程度：当前版本与要求版本的差距
                current_versions_j = cur_versions[js].float().unsqueeze(1)
                urgency = torch.clamp(required_versions - current_versions_j, min=0.0)  # [E_i, 1]
                
                # 升级成本：从当前版本升级到要求版本的最小成本
                upgrade_costs = []
                for idx, j in enumerate(js):
                    cur_v_j = cur_versions[j].item()
                    req_v = int(required_versions[idx].item())
                    if cur_v_j < req_v:
                        # 找到最小升级成本路径
                        min_cost = float('inf')
                        for target_v in range(req_v, self.V):
                            if self.jump[j, cur_v_j, target_v] > 0:
                                cost = self.ut[j, cur_v_j, target_v].item()
                                min_cost = min(min_cost, cost)
                        upgrade_costs.append(min_cost if min_cost != float('inf') else 0.0)
                    else:
                        upgrade_costs.append(0.0)
                
                upgrade_costs = torch.tensor(upgrade_costs, device=self.device).unsqueeze(1)  # [E_i, 1]
                
                # 组合边特征
                edge_feat = torch.cat([required_versions, urgency, upgrade_costs], dim=1)  # [E_i, 3]
                edge_features.append(edge_feat)
        
        if len(src) > 0:
            edge_index = torch.cat(src + dst, dim=0).view(2, -1)  # [2, E]
            edge_attr = torch.cat(edge_features, dim=0)  # [E, 3]
        else:
            edge_index = torch.empty((2, 0), dtype=torch.long, device=self.device)
            edge_attr = torch.empty((0, 3), dtype=torch.float, device=self.device)
        
        # 批次标识
        batch = torch.zeros(self.N, dtype=torch.long, device=self.device)
        
        return Data(x=x, edge_index=edge_index, edge_attr=edge_attr, batch=batch)

# 使用示例和测试
def test_advanced_gnn():
    """测试新的GNN架构"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 模拟参数
    num_components = 5
    max_versions = 10
    hidden_dim = 128
    
    # 创建模拟的依赖矩阵等
    dependency_matrix = np.random.randint(-1, max_versions, (num_components, num_components, max_versions))
    jump_matrix = np.random.randint(0, 2, (num_components, max_versions, max_versions))
    upgrade_time = np.random.randint(1, 10, (num_components, max_versions, max_versions))
    
    # 创建图构建器
    graph_builder = AdvancedVersionedDependencyGraph(
        dependency_matrix, jump_matrix, upgrade_time, device
    )
    
    # 创建GNN
    node_dim = 2 * max_versions + 7  # 根据新的特征计算
    edge_dim = 3
    gnn = TemporalAwareGNN(
        node_dim=node_dim,
        edge_dim=edge_dim,
        hidden_dim=hidden_dim,
        num_layers=3,
        num_heads=8
    ).to(device)
    
    # 创建Q网络
    q_net = CrossAttentionQNetwork(
        graph_hid_dim=hidden_dim,
        agent_feat_dim=5,  # 假设动作特征维度
        q_hid_dim=hidden_dim,
        num_heads=8
    ).to(device)
    
    # 测试前向传播
    cur_versions = torch.randint(0, max_versions, (num_components,), device=device)
    temporal_info = torch.tensor([0.5, 0.3], device=device)  # [remaining_time_norm, active_ratio]
    
    # 构建图
    graph_data = graph_builder.build_graph(cur_versions, active_upgrades={1, 3})
    
    # GNN前向传播
    graph_emb = gnn(graph_data, temporal_info)
    print(f"Graph embedding shape: {graph_emb.shape}")
    
    # Q网络前向传播
    action_feats = torch.randn(10, 5, device=device)  # 10个动作，每个5维特征
    q_values = q_net(graph_emb, action_feats)
    print(f"Q values shape: {q_values.shape}")
    
    print("Advanced GNN test passed!")

if __name__ == "__main__":
    test_advanced_gnn()