import random
import gymnasium as gym
import numpy as np
import collections
from tqdm import tqdm
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
import os
import json
from datetime import datetime
import sys

# 自定义JSON编码器处理NumPy数据类型
class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, 
                             np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
            return int(obj)
        elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
            return float(obj)
        elif isinstance(obj, np.bool_):
            return bool(obj)
        return super(NumpyEncoder, self).default(obj)

# 创建保存结果的目录
def create_save_dirs():
    """创建保存模型和结果的目录"""
    base_dir = "dqn_multi_scale_results"
    model_dir = os.path.join(base_dir, "models")
    plot_dir = os.path.join(base_dir, "plots")
    curriculum_dir = os.path.join(base_dir, "curriculum")
    multi_scale_dir = os.path.join(base_dir, "multi_scale")
    
    for directory in [base_dir, model_dir, plot_dir, curriculum_dir, multi_scale_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)
    
    return base_dir, model_dir, plot_dir, curriculum_dir, multi_scale_dir

# 创建目录
BASE_DIR, MODEL_DIR, PLOT_DIR, CURRICULUM_DIR, MULTI_SCALE_DIR = create_save_dirs()

# ====================== 多时间尺度核心组件 ======================
class MultiScaleQNet(torch.nn.Module):
    """多时间尺度Q网络：包含快尺度(STM)和慢尺度(LTM)网络"""
    def __init__(self, state_dim, hidden_dim, action_dim, stm_dim=64, ltm_dim=64):
        super(MultiScaleQNet, self).__init__()
        self.action_dim = action_dim
        
        # 快尺度网络 (Short-Term Memory) - 处理即时决策
        self.stm_fc1 = torch.nn.Linear(state_dim, stm_dim)
        self.stm_fc2 = torch.nn.Linear(stm_dim, stm_dim)
        
        # 慢尺度网络 (Long-Term Memory) - 处理长期策略
        self.ltm_fc1 = torch.nn.Linear(state_dim, ltm_dim)
        self.ltm_fc2 = torch.nn.Linear(ltm_dim, ltm_dim)
        
        # 注意力机制：动态权衡快慢尺度的贡献
        self.attention_fc = torch.nn.Linear(stm_dim + ltm_dim, 2)
        
        # 最终输出层
        self.output_fc = torch.nn.Linear(stm_dim + ltm_dim, action_dim)
        
    def forward(self, x):
        # 快尺度处理
        stm_out = F.relu(self.stm_fc1(x))
        stm_out = F.relu(self.stm_fc2(stm_out))
        
        # 慢尺度处理
        ltm_out = F.relu(self.ltm_fc1(x))
        ltm_out = F.relu(self.ltm_fc2(ltm_out))
        
        # 合并特征
        combined = torch.cat([stm_out, ltm_out], dim=1)
        
        # 注意力权重
        attention_weights = F.softmax(self.attention_fc(combined), dim=1)
        
        # 应用注意力权重
        stm_weighted = stm_out * attention_weights[:, 0].unsqueeze(1)
        ltm_weighted = ltm_out * attention_weights[:, 1].unsqueeze(1)
        
        # 最终输出
        output = self.output_fc(torch.cat([stm_weighted, ltm_weighted], dim=1))
        
        return output, attention_weights

class MultiScaleReplayBuffer:
    """多时间尺度经验回放缓冲区"""
    def __init__(self, capacity, stm_capacity=1000, ltm_capacity=5000):
        # 快尺度记忆（短期记忆）
        self.stm_buffer = collections.deque(maxlen=stm_capacity)
        # 慢尺度记忆（长期记忆）
        self.ltm_buffer = collections.deque(maxlen=ltm_capacity)
        # 完整记忆
        self.full_buffer = collections.deque(maxlen=capacity)
        
    def add(self, state, action, reward, next_state, done, attention_weights):
        experience = (state, action, reward, next_state, done, attention_weights)
        
        # 根据注意力权重决定存储位置
        stm_attention = attention_weights[0]  # 快尺度注意力
        ltm_attention = attention_weights[1]  # 慢尺度注意力
        
        # 存储到相应缓冲区
        self.full_buffer.append(experience)
        
        if stm_attention > ltm_attention:
            self.stm_buffer.append(experience)
        else:
            self.ltm_buffer.append(experience)
    
    def sample(self, batch_size, scale='full'):
        """从指定尺度的缓冲区采样"""
        if scale == 'stm':
            buffer = self.stm_buffer
        elif scale == 'ltm':
            buffer = self.ltm_buffer
        else:
            buffer = self.full_buffer
            
        if len(buffer) < batch_size:
            return None
            
        transitions = random.sample(buffer, batch_size)
        state, action, reward, next_state, done, attention_weights = zip(*transitions)
        
        return (np.array(state), action, reward, np.array(next_state), done, 
                np.array(attention_weights))
    
    def size(self, scale='full'):
        if scale == 'stm':
            return len(self.stm_buffer)
        elif scale == 'ltm':
            return len(self.ltm_buffer)
        else:
            return len(self.full_buffer)

class MultiScaleDQN:
    """多时间尺度DQN算法"""
    def __init__(self, state_dim, hidden_dim, action_dim, learning_rate, gamma,
                 epsilon, target_update, device, stm_update_freq=1, ltm_update_freq=5):
        self.action_dim = action_dim
        self.device = device
        
        # 主网络和目标网络
        self.q_net = MultiScaleQNet(state_dim, hidden_dim, action_dim).to(device)
        self.target_q_net = MultiScaleQNet(state_dim, hidden_dim, action_dim).to(device)
        self.base_lr = learning_rate
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=learning_rate)
        self.gamma = gamma
        self.epsilon = epsilon
        self.target_update = target_update
        
        # 时间尺度参数
        self.stm_update_freq = stm_update_freq  # 快尺度更新频率
        self.ltm_update_freq = ltm_update_freq  # 慢尺度更新频率
        
        self.step_count = 0
        self.stm_update_count = 0
        self.ltm_update_count = 0
        
        # 初始化目标网络
        self.target_q_net.load_state_dict(self.q_net.state_dict())
        
    def take_action(self, state):
        if np.random.random() < self.epsilon:
            action = np.random.randint(self.action_dim)
        else:
            state_tensor = torch.tensor(np.array([state]), dtype=torch.float).to(self.device)
            q_values, attention_weights = self.q_net(state_tensor)
            action = q_values.argmax().item()
        return action
    
    def update(self, transition_dict, scale='full'):
        states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
        actions = torch.tensor(transition_dict['actions']).view(-1, 1).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1).to(self.device)
        
        # 获取当前Q值和注意力权重
        q_values, attention_weights = self.q_net(states)
        current_q_values = q_values.gather(1, actions)
        
        # 获取目标Q值
        with torch.no_grad():
            target_q_values, _ = self.target_q_net(next_states)
            max_next_q_values = target_q_values.max(1)[0].view(-1, 1)
        
        # 计算目标
        q_targets = rewards + self.gamma * max_next_q_values * (1 - dones)
        
        # 计算损失
        loss = F.mse_loss(current_q_values, q_targets)
        
        # 根据时间尺度动态调整学习率
        if scale == 'stm':
            current_lr = self.base_lr * 2  # 快尺度:更高学习率
        elif scale == 'ltm':
            current_lr = self.base_lr * 0.5  # 慢尺度:更低学习率
        else:
            current_lr = self.base_lr  # 默认学习率

        # 临时调整学习率
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = current_lr
        
        # 优化
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # 恢复原始学习率
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = self.base_lr
        
        # 更新计数器
        self.step_count += 1
        if scale == 'stm':
            self.stm_update_count += 1
        elif scale == 'ltm':
            self.ltm_update_count += 1
        
        # 更新目标网络
        if self.step_count % self.target_update == 0:
            self.target_q_net.load_state_dict(self.q_net.state_dict())
        
        return loss.item(), attention_weights.mean(dim=0).detach().cpu().numpy()
    
    def save_model(self, filepath):
        """保存模型权重"""
        torch.save({
            'q_net_state_dict': self.q_net.state_dict(),
            'target_q_net_state_dict': self.target_q_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict()
        }, filepath)
    
    def load_model(self, filepath):
        """加载模型权重"""
        checkpoint = torch.load(filepath)
        self.q_net.load_state_dict(checkpoint['q_net_state_dict'])
        self.target_q_net.load_state_dict(checkpoint['target_q_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

# ====================== 环境与辅助类 ======================
class CustomCartPole(gym.Env):
    """自定义CartPole环境，支持动态参数调整"""
    def __init__(self, gravity=9.8, pole_length=0.5):
        super().__init__()
        self.base_env = gym.make('CartPole-v1')
        self.gravity = gravity
        self.pole_length = pole_length
        self.observation_space = self.base_env.observation_space
        self.action_space = self.base_env.action_space
        
    def step(self, action):
        # 应用当前物理参数
        self.base_env.unwrapped.gravity = self.gravity
        self.base_env.unwrapped.length = self.pole_length
        return self.base_env.step(action)
    
    def reset(self, **kwargs):
        return self.base_env.reset(**kwargs)
    
    def render(self, mode='human'):
        return self.base_env.render(mode)
    
    def close(self):
        return self.base_env.close()

class DifficultyScheduler:
    """动态难度调度器 - 课程学习核心"""
    def __init__(self, 
                 initial_gravity=9.8, 
                 initial_pole_length=0.5,
                 gravity_factor=0.1,
                 pole_length_factor=0.05,
                 performance_threshold=195.0):
        # 初始参数
        self.gravity = initial_gravity
        self.pole_length = initial_pole_length
        self.initial_gravity = initial_gravity
        self.initial_pole_length = initial_pole_length
        
        # 调整系数
        self.gravity_factor = gravity_factor
        self.pole_length_factor = pole_length_factor
        self.performance_threshold = performance_threshold
        
        # 跟踪变量
        self.t_step = 0
        self.difficulty_history = []
        
    def update_difficulty(self, performance):
        """根据性能动态更新难度参数"""
        self.t_step += 1
        
        # 重力调整：正弦波动（周期性增加挑战性）
        self.gravity = self.initial_gravity * (1 + self.gravity_factor * np.sin(self.t_step/100))
        
        # 杆长调整：渐进式增加（基于性能达标）
        if performance > self.performance_threshold:
            self.pole_length *= (1 + self.pole_length_factor)
        
        # 记录历史
        self.difficulty_history.append({
            'step': self.t_step,
            'gravity': self.gravity,
            'pole_length': self.pole_length,
            'performance': performance
        })
        
        return self.gravity, self.pole_length
    
    def save_difficulty_progression(self, filename="difficulty_progression.json"):
        """保存难度变化历史"""
        filepath = os.path.join(CURRICULUM_DIR, filename)
        with open(filepath, 'w') as f:
            json.dump(self.difficulty_history, f, indent=4)
        return filepath
    
    def plot_difficulty_progression(self, filename="difficulty_progression.png"):
        """可视化难度变化曲线"""
        steps = [entry['step'] for entry in self.difficulty_history]
        gravities = [entry['gravity'] for entry in self.difficulty_history]
        pole_lengths = [entry['pole_length'] for entry in self.difficulty_history]
        performances = [entry['performance'] for entry in self.difficulty_history]
        
        plt.figure(figsize=(12, 8))
        
        # 重力变化曲线
        plt.subplot(2, 1, 1)
        plt.plot(steps, gravities, label='Gravity', color='blue')
        plt.xlabel('Training Steps')
        plt.ylabel('Gravity (m/s²)')
        plt.title('Gravity Variation Over Training')
        plt.grid(True)
        
        # 杆长和性能曲线
        plt.subplot(2, 1, 2)
        plt.plot(steps, pole_lengths, label='Pole Length', color='green')
        plt.plot(steps, performances, label='Performance', color='red', linestyle='--')
        plt.xlabel('Training Steps')
        plt.ylabel('Value')
        plt.title('Pole Length and Performance Progression')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        # 保存图片
        filepath = os.path.join(CURRICULUM_DIR, filename)
        plt.savefig(filepath, dpi=300, bbox_inches='tight')
        plt.close()
        
        return filepath

# ====================== 工具函数 ======================
def save_training_plots(return_list, mv_return, episode, prefix=""):
    """保存训练曲线图"""
    episodes_list = list(range(len(return_list)))
    
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('DQN on CartPole-v1')
    
    plt.subplot(1, 2, 2)
    mv_episodes = list(range(len(mv_return)))
    plt.plot(mv_episodes, mv_return)
    plt.xlabel('Episodes')
    plt.ylabel('Moving Average Returns')
    plt.title('Moving Average (window=9)')
    
    plt.tight_layout()
    
    # 保存图片
    filename = f"{prefix}training_progress_episode_{episode}.png"
    filepath = os.path.join(PLOT_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

def save_training_info(hyperparams, return_list, mv_return):
    """保存训练信息和结果"""
    info = {
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
        'hyperparameters': hyperparams,
        'final_results': {
            'last_10_episodes_mean_return': np.mean(return_list[-10:]),
            'last_10_episodes_mean_mv_return': np.mean(mv_return[-10:]),
            'max_return': np.max(return_list),
            'min_return': np.min(return_list),
            'total_episodes': len(return_list)
        }
    }
    
    info_file = os.path.join(BASE_DIR, "training_info.json")
    with open(info_file, 'w') as f:
        json.dump(info, f, indent=4)
    
    return info_file

def save_multi_scale_analysis(attention_history, filename="multi_scale_analysis.png"):
    """保存多时间尺度分析结果"""
    steps = list(range(len(attention_history)))
    stm_attention = [attn[0] for attn in attention_history]
    ltm_attention = [attn[1] for attn in attention_history]
    
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.plot(steps, stm_attention, label='STM Attention', color='blue')
    plt.plot(steps, ltm_attention, label='LTM Attention', color='red')
    plt.xlabel('Training Steps')
    plt.ylabel('Attention Weights')
    plt.title('Multi-Scale Attention Weights')
    plt.legend()
    plt.grid(True)
    
    plt.subplot(1, 2, 2)
    window = 50
    stm_smooth = np.convolve(stm_attention, np.ones(window)/window, mode='valid')
    ltm_smooth = np.convolve(ltm_attention, np.ones(window)/window, mode='valid')
    plt.plot(range(len(stm_smooth)), stm_smooth, label='STM (smoothed)', color='blue')
    plt.plot(range(len(ltm_smooth)), ltm_smooth, label='LTM (smoothed)', color='red')
    plt.xlabel('Training Steps')
    plt.ylabel('Smoothed Attention')
    plt.title('Smoothed Attention Weights')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    
    filepath = os.path.join(MULTI_SCALE_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

# ====================== 主训练程序 ======================
if __name__ == "__main__":
    # 超参数设置
    lr = 2e-3
    num_episodes = 500
    hidden_dim = 128
    gamma = 0.98
    epsilon = 0.01
    target_update = 10
    buffer_size = 10000
    minimal_size = 500
    batch_size = 64
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    
    # 多时间尺度参数
    stm_update_freq = 1    # 快尺度更新频率
    ltm_update_freq = 5    # 慢尺度更新频率

    # 存储超参数
    hyperparams = {
        'learning_rate': lr,
        'num_episodes': num_episodes,
        'hidden_dim': hidden_dim,
        'gamma': gamma,
        'epsilon': epsilon,
        'target_update': target_update,
        'buffer_size': buffer_size,
        'minimal_size': minimal_size,
        'batch_size': batch_size,
        'device': str(device),
        'curriculum_learning': True,
        'multi_scale_learning': True,
        'stm_update_freq': stm_update_freq,
        'ltm_update_freq': ltm_update_freq
    }

    # 创建环境和难度调度器
    env = CustomCartPole()
    difficulty_scheduler = DifficultyScheduler(
        initial_gravity=9.8,
        initial_pole_length=0.5,
        gravity_factor=0.1,
        pole_length_factor=0.03,
        performance_threshold=195.0
    )

    # 设置随机种子保证可复现性
    random.seed(0)
    np.random.seed(0)
    env.reset(seed=0)
    torch.manual_seed(0)

    # 使用多时间尺度回放缓冲区
    replay_buffer = MultiScaleReplayBuffer(buffer_size)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    
    # 使用多时间尺度DQN
    agent = MultiScaleDQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, 
                         target_update, device, stm_update_freq, ltm_update_freq)

    # 初始化性能记录列表
    return_list = []
    attention_history = []  # 记录注意力权重历史
    
    print(f"开始训练多时间尺度DQN算法(CartPole-v1)...")
    print(f"结果将保存在: {BASE_DIR}")

    # 主训练循环
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
            for i_episode in range(int(num_episodes / 10)):
                # 每10个回合评估并更新难度
                if i_episode > 0 and i_episode % 10 == 0:
                    avg_performance = np.mean(return_list[-10:]) if len(return_list) >= 10 else 0
                    gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
                    env.gravity = gravity
                    env.pole_length = pole_length
                    
                    # 更新进度条显示当前难度参数
                    pbar.set_postfix({
                        'gravity': f'{gravity:.2f}',
                        'pole_length': f'{pole_length:.4f}',
                        'perf': f'{avg_performance:.1f}'
                    })
                
                # 开始新回合
                episode_return = 0
                state, info = env.reset()
                done = False
                
                while not done:
                    action = agent.take_action(state)
                    next_state, reward, terminated, truncated, _ = env.step(action)
                    
                    if terminated or truncated:
                        done = True
                    
                    # 获取注意力权重（用于经验存储）
                    state_tensor = torch.tensor(np.array([state]), dtype=torch.float).to(device)
                    _, attention_weights = agent.q_net(state_tensor)
                    attention_weights_mean = attention_weights.mean(dim=0).detach().cpu().numpy()
                    
                    # 存储经验（包含注意力权重）
                    replay_buffer.add(state, action, reward, next_state, done, attention_weights_mean)
                    state = next_state
                    episode_return += reward
                    
                    # 多时间尺度训练
                    if replay_buffer.size() > minimal_size:
                        # 快尺度训练（高频）
                        if agent.stm_update_count % stm_update_freq == 0:
                            stm_batch = replay_buffer.sample(batch_size // 2, 'stm')
                            if stm_batch is not None:
                                b_s, b_a, b_r, b_ns, b_d, b_attn = stm_batch
                                transition_dict = {
                                    'states': b_s, 'actions': b_a, 'rewards': b_r,
                                    'next_states': b_ns, 'dones': b_d
                                }
                                loss, attn_weights = agent.update(transition_dict, 'stm')
                                attention_history.append(attn_weights)
                        
                        # 慢尺度训练（低频）
                        if agent.ltm_update_count % ltm_update_freq == 0:
                            ltm_batch = replay_buffer.sample(batch_size // 2, 'ltm')
                            if ltm_batch is not None:
                                b_s, b_a, b_r, b_ns, b_d, b_attn = ltm_batch
                                transition_dict = {
                                    'states': b_s, 'actions': b_a, 'rewards': b_r,
                                    'next_states': b_ns, 'dones': b_d
                                }
                                loss, attn_weights = agent.update(transition_dict, 'ltm')
                                attention_history.append(attn_weights)
                        
                        # 完整训练
                        full_batch = replay_buffer.sample(batch_size, 'full')
                        if full_batch is not None:
                            b_s, b_a, b_r, b_ns, b_d, b_attn = full_batch
                            transition_dict = {
                                'states': b_s, 'actions': b_a, 'rewards': b_r,
                                'next_states': b_ns, 'dones': b_d
                            }
                            loss, attn_weights = agent.update(transition_dict, 'full')
                            attention_history.append(attn_weights)
                
                # 记录当前回合回报
                return_list.append(episode_return)
                
                # 更新进度条
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({
                        'episode': '%d' % (num_episodes / 10 * i + i_episode + 1),
                        'return': '%.3f' % np.mean(return_list[-10:])
                    })
                
                pbar.update(1)
            
            # 每完成一个迭代保存一次模型和图表
            mv_return = np.convolve(return_list, np.ones(9)/9, mode='valid')
            if len(mv_return) > 0:
                # 保存模型
                agent.save_model(os.path.join(MODEL_DIR, f"dqn_model_iteration_{i}.pth"))
                
                # 保存训练曲线
                plot_path = save_training_plots(return_list, mv_return, (i+1) * int(num_episodes / 10), f"iter_{i}_")
                
                # 保存多时间尺度分析
                if attention_history:
                    attn_plot_path = save_multi_scale_analysis(attention_history, f"attention_iteration_{i}.png")

    # ====================== 训练完成后保存最终结果 ======================
# ====================== 训练完成后保存最终结果 ======================
    # 计算移动平均回报
    mv_return = np.convolve(return_list, np.ones(9)/9, mode='valid')

    # 保存最终模型
    final_model_path = os.path.join(MODEL_DIR, "dqn_model_final.pth")
    agent.save_model(final_model_path)

    # 保存最终训练曲线
    final_plot_path = save_training_plots(return_list, mv_return, num_episodes, "final_")

    # 保存训练信息
    info_path = save_training_info(hyperparams, return_list, mv_return)

    # 保存课程学习过程数据
    diff_progression_path = difficulty_scheduler.save_difficulty_progression()
    diff_plot_path = difficulty_scheduler.plot_difficulty_progression()

    # 保存多时间尺度分析
    if attention_history:
        final_attn_plot_path = save_multi_scale_analysis(attention_history, "attention_final.png")
        
        # 保存注意力权重数据（使用自定义编码器处理NumPy数组）
        attn_data_path = os.path.join(MULTI_SCALE_DIR, "attention_weights.json")
        with open(attn_data_path, 'w') as f:
            json.dump({'attention_weights': attention_history}, f, cls=NumpyEncoder, indent=4)
    else:
        final_attn_plot_path = None
        print("警告：未收集到注意力权重历史数据")

    # 打印训练总结
    print(f"\n{'='*50}")
    print(f"{'训练完成！':^50}")
    print(f"{'='*50}")
    print(f"⚙️ 最终模型已保存: {final_model_path}")
    print(f"📈 训练曲线图已保存: {final_plot_path}")
    print(f"📝 训练信息已保存: {info_path}")
    print(f"📊 课程学习过程数据已保存: {diff_progression_path}")
    if final_attn_plot_path:
        print(f"🔍 多时间尺度分析图已保存: {final_attn_plot_path}")
    print(f"🏆 最后10个回合的平均回报: {np.mean(return_list[-10:]):.3f}")
    print(f"{'='*50}")

    # 生成并显示最终图表
    plt.figure(figsize=(14, 6))
    
    # 回报曲线
    plt.subplot(1, 2, 1)
    episodes_list = list(range(len(return_list)))
    plt.plot(episodes_list, return_list, label='return per round', color='blue', alpha=0.7)
    plt.plot(episodes_list[:len(mv_return)], mv_return, label='move_average_return', color='red', linewidth=2)
    plt.xlabel('training round')
    plt.ylabel('return')
    plt.title('Performance of multi-time scale DQN on CartPole-v1')
    plt.legend()
    plt.grid(True)
    
    # 难度变化曲线
    plt.subplot(1, 2, 2)
    if difficulty_scheduler.difficulty_history:
        steps = [entry['step'] for entry in difficulty_scheduler.difficulty_history]
        gravities = [entry['gravity'] for entry in difficulty_scheduler.difficulty_history]
        pole_lengths = [entry['pole_length'] for entry in difficulty_scheduler.difficulty_history]
        performances = [entry['performance'] for entry in difficulty_scheduler.difficulty_history]
        
        plt.plot(steps, gravities, label='gravity', color='green', linestyle='-')
        plt.plot(steps, pole_lengths, label='long', color='purple', linestyle='--')
        plt.plot(steps, performances, label='perfomance', color='orange', linestyle='-.')
        plt.xlabel('step')
        plt.ylabel('value')
        plt.title('dificulty_changes')
        plt.legend()
        plt.grid(True)
    else:
        plt.text(0.5, 0.5, 'no difficulty_changes data', 
                 ha='center', va='center', fontsize=12)
    
    plt.tight_layout()
    plt.savefig(os.path.join(PLOT_DIR, "final_comprehensive_results.png"), 
                dpi=300, bbox_inches='tight')
    plt.show()
    
    # 如果有注意力历史，显示注意力权重变化
    if attention_history:
        plt.figure(figsize=(12, 5))
        steps = list(range(len(attention_history)))
        stm_attention = [attn[0] for attn in attention_history]
        ltm_attention = [attn[1] for attn in attention_history]
        
        plt.subplot(1, 2, 1)
        plt.plot(steps, stm_attention, label='fast scale attention', color='blue')
        plt.plot(steps, ltm_attention, label='slow scale attention', color='red')
        plt.xlabel('step')
        plt.ylabel('attention weight')
        plt.title('multi-time scale attention weight')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(1, 2, 2)
        window = 50
        if len(stm_attention) > window:
            stm_smooth = np.convolve(stm_attention, np.ones(window)/window, mode='valid')
            ltm_smooth = np.convolve(ltm_attention, np.ones(window)/window, mode='valid')
            plt.plot(range(len(stm_smooth)), stm_smooth, label='fast scale (smooth)', color='blue')
            plt.plot(range(len(ltm_smooth)), ltm_smooth, label='slow scale (smooth)', color='red')
            plt.xlabel('step')
            plt.ylabel('attention after smooth')
            plt.title('multi-time scale attention after smooth')
            plt.legend()
            plt.grid(True)
        
        plt.tight_layout()
        plt.savefig(os.path.join(MULTI_SCALE_DIR, "final_attention_weights.png"), 
                    dpi=300, bbox_inches='tight')
        plt.show()