import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import numpy as np
from collections import deque
from advanced_graph_neural_network import TemporalAwareGNN, CrossAttentionQNetwork, AdvancedVersionedDependencyGraph

class PriorityReplayBuffer:
    """优先级经验回放缓冲区"""
    def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment=0.001):
        self.capacity = capacity
        self.alpha = alpha
        self.beta = beta
        self.beta_increment = beta_increment
        self.buffer = []
        self.pos = 0
        self.priorities = np.zeros((capacity,), dtype=np.float32)
        
    def add(self, graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done, td_error=1.0):
        """添加经验，td_error用于计算优先级"""
        max_prio = self.priorities.max() if self.buffer else 1.0
        
        if len(self.buffer) < self.capacity:
            self.buffer.append(None)
        
        self.buffer[self.pos] = (graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done)
        self.priorities[self.pos] = max_prio
        
        self.pos = (self.pos + 1) % self.capacity
    
    def sample(self, batch_size):
        if len(self.buffer) == self.capacity:
            prios = self.priorities
        else:
            prios = self.priorities[:self.pos]
        
        probs = prios ** self.alpha
        probs /= probs.sum()
        
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)
        samples = [self.buffer[idx] for idx in indices]
        
        total = len(self.buffer)
        weights = (total * probs[indices]) ** (-self.beta)
        weights /= weights.max()
        weights = np.array(weights, dtype=np.float32)
        
        self.beta = min(1.0, self.beta + self.beta_increment)
        
        g_data, a_feats, acts, rews, ng_data, na_feats, dones = zip(*samples)
        return g_data, a_feats, acts, rews, ng_data, na_feats, dones, indices, weights
    
    def update_priorities(self, indices, td_errors):
        for idx, td_error in zip(indices, td_errors):
            self.priorities[idx] = abs(td_error) + 1e-6
    
    def __len__(self):
        return len(self.buffer)

class NoisyLinear(nn.Module):
    """Noisy Networks for Exploration"""
    def __init__(self, in_features, out_features, sigma_init=0.5):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.sigma_init = sigma_init
        
        # 可学习参数
        self.weight_mu = nn.Parameter(torch.empty(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.empty(out_features, in_features))
        self.bias_mu = nn.Parameter(torch.empty(out_features))
        self.bias_sigma = nn.Parameter(torch.empty(out_features))
        
        # 噪声参数（不可学习）
        self.register_buffer('weight_epsilon', torch.empty(out_features, in_features))
        self.register_buffer('bias_epsilon', torch.empty(out_features))
        
        self.reset_parameters()
        self.reset_noise()
    
    def reset_parameters(self):
        mu_range = 1 / np.sqrt(self.in_features)
        self.weight_mu.data.uniform_(-mu_range, mu_range)
        self.weight_sigma.data.fill_(self.sigma_init / np.sqrt(self.in_features))
        self.bias_mu.data.uniform_(-mu_range, mu_range)
        self.bias_sigma.data.fill_(self.sigma_init / np.sqrt(self.out_features))
    
    def reset_noise(self):
        epsilon_in = self._scale_noise(self.in_features)
        epsilon_out = self._scale_noise(self.out_features)
        self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
        self.bias_epsilon.copy_(epsilon_out)
    
    def _scale_noise(self, size):
        x = torch.randn(size, device=self.weight_mu.device)
        return x.sign().mul_(x.abs().sqrt_())
    
    def forward(self, input):
        if self.training:
            return F.linear(input, 
                          self.weight_mu + self.weight_sigma * self.weight_epsilon,
                          self.bias_mu + self.bias_sigma * self.bias_epsilon)
        else:
            return F.linear(input, self.weight_mu, self.bias_mu)

class DuelingQNetwork(nn.Module):
    """Dueling Network结合CrossAttention的Q网络"""
    def __init__(self, graph_hid_dim, agent_feat_dim, q_hid_dim, num_heads=8, dropout=0.1):
        super().__init__()
        
        # 使用交叉注意力作为特征提取器
        self.cross_attention = CrossAttentionQNetwork(
            graph_hid_dim, agent_feat_dim, q_hid_dim, num_heads, dropout
        )
        
        # Dueling结构：价值流和优势流
        # 价值流（标量）- 评估当前状态的价值
        self.value_stream = nn.Sequential(
            NoisyLinear(q_hid_dim, q_hid_dim // 2),
            nn.ReLU(),
            NoisyLinear(q_hid_dim // 2, 1)
        )
        
        # 优势流（向量）- 评估每个动作相对其他动作的优势
        self.advantage_stream = nn.Sequential(
            NoisyLinear(q_hid_dim, q_hid_dim // 2),
            nn.ReLU(),
            NoisyLinear(q_hid_dim // 2, 1)
        )
        
        # 归一化层
        self.layer_norm = nn.LayerNorm(q_hid_dim)
        
    def forward(self, graph_emb, action_feats):
        """
        graph_emb: [graph_hid_dim] 或 [B, graph_hid_dim]
        action_feats: [K, agent_feat_dim] 或 [B, agent_feat_dim]
        """
        # 通过交叉注意力提取特征
        features = self.cross_attention(graph_emb, action_feats)  # [K] 或 [B]
        
        # 处理维度以适应Dueling结构
        if features.dim() == 1:
            features = features.unsqueeze(1)  # [K, 1] -> [K, q_hid_dim] 需要修复
            # 实际上交叉注意力输出应该是 [K, q_hid_dim]，这里需要重构
            batch_mode = False
        else:
            batch_mode = True
        
        # 重新设计：交叉注意力应该输出特征而不是Q值
        # 这里我们修改设计
        return features  # 暂时返回原始输出，需要在CrossAttentionQNetwork中修改

class AdvancedCrossAttentionQNetwork(nn.Module):
    """改进的交叉注意力Q网络，支持Dueling架构"""
    def __init__(self, graph_hid_dim, agent_feat_dim, q_hid_dim, num_heads=8, dropout=0.1):
        super().__init__()
        self.graph_hid_dim = graph_hid_dim
        self.agent_feat_dim = agent_feat_dim
        self.q_hid_dim = q_hid_dim
        self.num_heads = num_heads
        
        # 特征提取部分（不直接输出Q值）
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=q_hid_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 特征投影层
        self.graph_proj = nn.Linear(graph_hid_dim, q_hid_dim)
        self.action_proj = nn.Linear(agent_feat_dim, q_hid_dim)
        
        # 自注意力层
        self.self_attention = nn.MultiheadAttention(
            embed_dim=q_hid_dim,
            num_heads=num_heads,
            dropout=dropout,
            batch_first=True
        )
        
        # 特征融合层
        self.feature_fusion = nn.Sequential(
            nn.Linear(q_hid_dim * 2, q_hid_dim),
            nn.LayerNorm(q_hid_dim),
            nn.GELU(),
            nn.Dropout(dropout)
        )
        
        # Dueling结构
        # 价值流：评估当前图状态的整体价值
        self.value_stream = nn.Sequential(
            NoisyLinear(q_hid_dim, q_hid_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            NoisyLinear(q_hid_dim // 2, 1)
        )
        
        # 优势流：评估每个动作的相对优势
        self.advantage_stream = nn.Sequential(
            NoisyLinear(q_hid_dim, q_hid_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            NoisyLinear(q_hid_dim // 2, 1)
        )
        
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, graph_emb, action_feats):
        """
        graph_emb: [graph_hid_dim] 或 [B, graph_hid_dim]
        action_feats: [K, agent_feat_dim] 或 [B, agent_feat_dim]
        返回: [K] 或 [B] 的Q值
        """
        # 处理维度
        if graph_emb.dim() == 1:
            # 推理模式：单图多动作
            K = action_feats.size(0)
            graph_emb = graph_emb.unsqueeze(0).expand(K, -1)  # [K, graph_hid_dim]
            batch_mode = False
        else:
            # 训练模式：批量处理
            batch_mode = True
            K = action_feats.size(0)
        
        print("action_feats.shape:", action_feats.shape)
        print("expected agent_feat_dim:", self.agent_feat_dim)
        print("dtype:", action_feats.dtype, "device:", action_feats.device)


        # 特征投影
        graph_feat = self.graph_proj(graph_emb)  # [K, q_hid_dim] 或 [B, q_hid_dim]
        action_feat = self.action_proj(action_feats)  # [K, q_hid_dim] 或 [B, q_hid_dim]
        
        if not batch_mode:
            # 推理模式：处理动作序列
            action_seq = action_feat.unsqueeze(0)  # [1, K, q_hid_dim]
            graph_seq = graph_feat.unsqueeze(0)    # [1, K, q_hid_dim]
            
            # 自注意力：动作之间的交互
            action_attn, _ = self.self_attention(action_seq, action_seq, action_seq)
            action_feat_enhanced = action_feat + action_attn.squeeze(0)
            
            # 交叉注意力：图特征和动作特征的交互
            cross_attn, _ = self.cross_attention(graph_seq, action_seq, action_seq)
            graph_feat_enhanced = graph_feat + cross_attn.squeeze(0)
            
            # 特征融合
            combined_feat = torch.cat([graph_feat_enhanced, action_feat_enhanced], dim=1)
            fused_feat = self.feature_fusion(combined_feat)  # [K, q_hid_dim]
            
        else:
            # 训练模式：直接融合
            combined_feat = torch.cat([graph_feat, action_feat], dim=1)
            fused_feat = self.feature_fusion(combined_feat)  # [B, q_hid_dim]
        
        # Dueling网络计算
        values = self.value_stream(fused_feat)      # [K, 1] 或 [B, 1]
        advantages = self.advantage_stream(fused_feat)  # [K, 1] 或 [B, 1]
        
        # Dueling公式：Q(s,a) = V(s) + (A(s,a) - mean(A(s,·)))
        if not batch_mode:
            # 推理模式：在动作维度上计算平均
            advantages_mean = advantages.mean(dim=0, keepdim=True)  # [1, 1]
            q_values = values + (advantages - advantages_mean)      # [K, 1]
        else:
            # 训练模式：每个样本单独处理
            q_values = values + advantages  # [B, 1]
        
        return q_values.squeeze(-1)  # [K] 或 [B]

class AdvancedGNNDQNAgent:
    """增强版GNN DQN智能体"""
    def __init__(self,
                 dependency_matrix: np.ndarray,
                 jump_matrix: np.ndarray,
                 upgrade_time: np.ndarray,
                 graph_hid_dim: int = 128,
                 q_net_hid_dim: int = 256,
                 agent_feat_dim: int = 15,
                 lr: float = 1e-4,
                 gamma: float = 0.99,
                 eps_start: float = 1.0,
                 eps_end: float = 0.01,
                 eps_decay: float = 0.995,
                 target_update: int = 100,
                 device: str = 'cpu',
                 use_double_dqn: bool = True,
                 use_dueling: bool = True,
                 use_noisy: bool = True,
                 use_priority_buffer: bool = True):
        
        self.device = torch.device(device)
        self.gamma = gamma
        self.eps_start = eps_start
        self.eps_end = eps_end
        self.eps_decay = eps_decay
        self.current_eps = eps_start
        self.target_update = target_update
        self.use_double_dqn = use_double_dqn
        self.use_dueling = use_dueling
        self.use_noisy = use_noisy
        self.q_hid_dim = q_net_hid_dim

        # 图构建器
        self.graph_builder = AdvancedVersionedDependencyGraph(
            dependency_matrix, jump_matrix, upgrade_time, self.device
        )
        
        # 计算节点特征维度
        num_components, _, max_versions = dependency_matrix.shape
        node_dim = 2 * max_versions + 7  # 根据AdvancedVersionedDependencyGraph的特征计算
        edge_dim = 3
        
        # GNN编码器
        self.gnn = TemporalAwareGNN(
            node_dim=node_dim,
            edge_dim=edge_dim,
            hidden_dim=graph_hid_dim,
            num_layers=3,
            num_heads=8,
            dropout=0.1
        ).to(self.device)
        
        # Q网络
        if use_dueling:
            self.q_net = AdvancedCrossAttentionQNetwork(
                graph_hid_dim=graph_hid_dim,
                agent_feat_dim=agent_feat_dim,
                q_hid_dim=q_net_hid_dim,
                num_heads=8,
                dropout=0.1
            ).to(self.device)
            
            self.target_q_net = AdvancedCrossAttentionQNetwork(
                graph_hid_dim=graph_hid_dim,
                agent_feat_dim=agent_feat_dim,
                q_hid_dim=q_net_hid_dim,
                num_heads=8,
                dropout=0.1
            ).to(self.device)
        else:
            # 使用原始的CrossAttentionQNetwork
            from advanced_graph_neural_network import CrossAttentionQNetwork
            self.q_net = CrossAttentionQNetwork(
                graph_hid_dim, agent_feat_dim, q_net_hid_dim, 8, 0.1
            ).to(self.device)
            
            self.target_q_net = CrossAttentionQNetwork(
                graph_hid_dim, agent_feat_dim, q_net_hid_dim, 8, 0.1
            ).to(self.device)
        
        # 同步目标网络
        self.target_q_net.load_state_dict(self.q_net.state_dict())
        
        # 优化器
        all_params = list(self.gnn.parameters()) + list(self.q_net.parameters())
        self.optimizer = torch.optim.AdamW(all_params, lr=lr, weight_decay=1e-5)
        
        # 学习率调度器
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer, T_max=1000, eta_min=lr/10
        )
        
        # 经验回放缓冲区
        if use_priority_buffer:
            self.buffer = PriorityReplayBuffer(50000, alpha=0.6, beta=0.4)
            self.use_priority_buffer = True
        else:
            from collections import deque
            self.buffer = deque(maxlen=50000)
            self.use_priority_buffer = False
        
        self.batch_size = 128
        self.step_count = 0
        
        # 用于跟踪训练指标
        self.training_metrics = {
            'loss_history': [],
            'q_value_history': [],
            'epsilon_history': [],
            'td_error_history': []
        }
        
    def select_action(self, state, valid_action_indices, action_features, 
                      remaining_time=None, active_upgrades=None, is_training=True):
        """选择动作"""
        # 构建图数据
        temporal_info = torch.tensor([remaining_time or 0.0, 
                                    len(active_upgrades or []) / 10.0], 
                                   device=self.device)
        
        graph_data = self.graph_builder.build_graph(
            state, remaining_time, active_upgrades
        )
        
        # epsilon-greedy策略（如果不使用Noisy Networks）
        if is_training and not self.use_noisy and random.random() < self.current_eps:
            return random.choice(valid_action_indices)
        
        with torch.no_grad():
            # GNN编码
            graph_emb = self.gnn(graph_data, temporal_info)
            
            # 只考虑有效动作的特征
            valid_action_feats = action_features[valid_action_indices]
            
            # 计算Q值
            q_values = self.q_net(graph_emb, valid_action_feats)
            
            # 选择最优动作
            best_action_idx = q_values.argmax().item()
            selected_action = valid_action_indices[best_action_idx]
        
        return selected_action
    
    def store_experience(self, state, valid_indices, action_features, action_idx, 
                        reward, next_state, next_valid_indices, next_action_features, 
                        done, remaining_time=None, active_upgrades=None,
                        next_remaining_time=None, next_active_upgrades=None):
        """存储经验"""
        experience = {
            'state': state,
            'valid_indices': valid_indices,
            'action_features': action_features,
            'action_idx': action_idx,
            'reward': reward,
            'next_state': next_state,
            'next_valid_indices': next_valid_indices,
            'next_action_features': next_action_features,
            'done': done,
            'remaining_time': remaining_time,
            'active_upgrades': active_upgrades,
            'next_remaining_time': next_remaining_time,
            'next_active_upgrades': next_active_upgrades
        }
        
        if self.use_priority_buffer:
            # 优先级回放需要计算TD误差
            td_error = self._compute_td_error(experience)
            self.buffer.add(
                state, action_features, action_idx, reward,
                next_state, next_action_features, done, td_error
            )
        else:
            self.buffer.append(experience)
    
    def _compute_td_error(self, experience):
        """计算TD误差用于优先级回放"""
        try:
            with torch.no_grad():
                # 构建当前状态图
                current_graph = self.graph_builder.build_graph(
                    experience['state'], 
                    experience['remaining_time'], 
                    experience['active_upgrades']
                )
                
                # 构建下一状态图
                next_graph = self.graph_builder.build_graph(
                    experience['next_state'],
                    experience['next_remaining_time'],
                    experience['next_active_upgrades']
                )
                
                # 计算当前Q值
                temporal_info = torch.tensor([experience['remaining_time'] or 0.0, 
                                            len(experience['active_upgrades'] or []) / 10.0], 
                                           device=self.device)
                graph_emb = self.gnn(current_graph, temporal_info)
                
                action_idx_tensor = torch.tensor([experience['action_idx']], device=self.device)
                action_feat = experience['action_features'][action_idx_tensor]
                current_q = self.q_net(graph_emb, action_feat).item()
                
                # 计算目标Q值
                next_temporal_info = torch.tensor([experience['next_remaining_time'] or 0.0,
                                                 len(experience['next_active_upgrades'] or []) / 10.0],
                                                device=self.device)
                next_graph_emb = self.gnn(next_graph, next_temporal_info)
                next_q_values = self.target_q_net(next_graph_emb, experience['next_action_features'])
                target_q = experience['reward'] + self.gamma * next_q_values.max().item() * (1 - experience['done'])
                
                return abs(current_q - target_q)
        except:
            return 1.0  # 默认TD误差
    
    def update(self):
        """更新网络参数"""
        if self.use_priority_buffer:
            if len(self.buffer) < self.batch_size:
                return
            
            # 优先级采样
            experiences, indices, weights = self.buffer.sample(self.batch_size)
            weights = torch.tensor(weights, device=self.device)
        else:
            if len(self.buffer) < self.batch_size:
                return
            
            # 随机采样
            experiences = random.sample(self.buffer, self.batch_size)
            indices = None
            weights = torch.ones(self.batch_size, device=self.device)
        
        # 解析批次数据
        states, action_features, actions, rewards, next_states, next_action_features, dones = [], [], [], [], [], [], []
        remaining_times, active_upgrades_list = [], []
        next_remaining_times, next_active_upgrades_list = [], []
        
        for exp in experiences:
            if self.use_priority_buffer:
                # 优先级缓冲区存储格式不同
                graph_data, agent_feats, action_idx, reward, next_graph_data, next_agent_feats, done = exp
                states.append(graph_data)
                next_states.append(next_graph_data)
                action_features.append(agent_feats[action_idx])
                next_action_features.append(next_agent_feats)
                actions.append(action_idx)
                rewards.append(reward)
                dones.append(done)
                remaining_times.append(0.5)  # 默认值
                active_upgrades_list.append([])
                next_remaining_times.append(0.5)
                next_active_upgrades_list.append([])
            else:
                states.append(exp['state'])
                action_features.append(exp['action_features'][exp['action_idx']])
                actions.append(exp['action_idx'])
                rewards.append(exp['reward'])
                next_states.append(exp['next_state'])
                next_action_features.append(exp['next_action_features'])
                dones.append(exp['done'])
                remaining_times.append(exp['remaining_time'] or 0.5)
                active_upgrades_list.append(exp['active_upgrades'] or [])
                next_remaining_times.append(exp['next_remaining_time'] or 0.5)
                next_active_upgrades_list.append(exp['next_active_upgrades'] or [])
        
        # 转换为张量
        rewards = torch.tensor(rewards, dtype=torch.float, device=self.device)
        dones = torch.tensor(dones, dtype=torch.float, device=self.device)
        
        # 计算当前Q值
        current_q_values = []
        for i, (state, remaining_time, active_upgrades) in enumerate(zip(states, remaining_times, active_upgrades_list)):
            temporal_info = torch.tensor([remaining_time, len(active_upgrades) / 10.0], device=self.device)
            if not self.use_priority_buffer:
                graph_data = self.graph_builder.build_graph(state, remaining_time, active_upgrades)
            else:
                graph_data = state
            
            graph_emb = self.gnn(graph_data, temporal_info)
            action_feat = action_features[i].unsqueeze(0) if hasattr(action_features[i], 'unsqueeze') else torch.tensor([action_features[i]], device=self.device)
            q_val = self.q_net(graph_emb, action_feat)
            current_q_values.append(q_val.squeeze())
        
        current_q_values = torch.stack(current_q_values)
        
        # 计算目标Q值
        next_q_values = []
        with torch.no_grad():
            for i, (next_state, next_remaining_time, next_active_upgrades) in enumerate(zip(next_states, next_remaining_times, next_active_upgrades_list)):
                next_temporal_info = torch.tensor([next_remaining_time, len(next_active_upgrades) / 10.0], device=self.device)
                if not self.use_priority_buffer:
                    next_graph_data = self.graph_builder.build_graph(next_state, next_remaining_time, next_active_upgrades)
                else:
                    next_graph_data = next_state
                
                next_graph_emb = self.gnn(next_graph_data, next_temporal_info)
                
                if self.use_double_dqn:
                    # Double DQN
                    next_q_main = self.q_net(next_graph_emb, next_action_features[i])
                    best_actions = next_q_main.argmax(dim=0, keepdim=True)
                    next_q_target = self.target_q_net(next_graph_emb, next_action_features[i])
                    next_q_val = next_q_target.gather(0, best_actions)
                else:
                    # 标准DQN
                    next_q_val = self.target_q_net(next_graph_emb, next_action_features[i]).max()
                
                next_q_values.append(next_q_val)
        
        next_q_values = torch.stack(next_q_values).squeeze()
        target_q_values = rewards + self.gamma * next_q_values * (1 - dones)
        
        # 计算损失
        td_errors = current_q_values - target_q_values
        loss = (weights * td_errors.pow(2)).mean()
        
        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(
            list(self.gnn.parameters()) + list(self.q_net.parameters()), 
            max_norm=10.0
        )
        
        self.optimizer.step()
        self.scheduler.step()
        
        # 更新优先级
        if self.use_priority_buffer and indices is not None:
            self.buffer.update_priorities(indices, td_errors.detach().cpu().numpy())
        
        # 更新目标网络
        self.step_count += 1
        if self.step_count % self.target_update == 0:
            self.target_q_net.load_state_dict(self.q_net.state_dict())
        
        # 更新epsilon
        if not self.use_noisy:
            self.current_eps = max(self.eps_end, self.current_eps * self.eps_decay)
        
        # 重置Noisy层的噪声
        if self.use_noisy:
            self._reset_noise()
        
        # 记录训练指标
        self.training_metrics['loss_history'].append(loss.item())
        self.training_metrics['q_value_history'].append(current_q_values.mean().item())
        self.training_metrics['epsilon_history'].append(self.current_eps)
        self.training_metrics['td_error_history'].append(td_errors.abs().mean().item())
        
        return loss.item()
    
    def _reset_noise(self):
        """重置Noisy层的噪声"""
        for module in self.q_net.modules():
            if isinstance(module, NoisyLinear):
                module.reset_noise()
        
        for module in self.target_q_net.modules():
            if isinstance(module, NoisyLinear):
                module.reset_noise()
    
    def save(self, path):
        """保存模型"""
        checkpoint = {
            'gnn_state_dict': self.gnn.state_dict(),
            'q_net_state_dict': self.q_net.state_dict(),
            'target_q_net_state_dict': self.target_q_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'step_count': self.step_count,
            'current_eps': self.current_eps,
            'training_metrics': self.training_metrics,
            'config': {
                'use_double_dqn': self.use_double_dqn,
                'use_dueling': self.use_dueling,
                'use_noisy': self.use_noisy,
                'use_priority_buffer': self.use_priority_buffer
            }
        }
        torch.save(checkpoint, path)
        print(f"Model saved to {path}")
    
    def load(self, path, map_location=None):
        """加载模型"""
        checkpoint = torch.load(path, map_location=map_location)
        
        self.gnn.load_state_dict(checkpoint['gnn_state_dict'])
        self.q_net.load_state_dict(checkpoint['q_net_state_dict'])
        self.target_q_net.load_state_dict(checkpoint['target_q_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        if 'scheduler_state_dict' in checkpoint:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        
        self.step_count = checkpoint.get('step_count', 0)
        self.current_eps = checkpoint.get('current_eps', self.eps_start)
        self.training_metrics = checkpoint.get('training_metrics', {
            'loss_history': [], 'q_value_history': [], 
            'epsilon_history': [], 'td_error_history': []
        })
        
        print(f"Model loaded from {path}")
    
    def get_training_info(self):
        """获取训练信息"""
        return {
            'step_count': self.step_count,
            'current_epsilon': self.current_eps,
            'buffer_size': len(self.buffer),
            'recent_loss': self.training_metrics['loss_history'][-10:] if self.training_metrics['loss_history'] else [],
            'recent_q_values': self.training_metrics['q_value_history'][-10:] if self.training_metrics['q_value_history'] else [],
        }