import random
import gymnasium as gym
import numpy as np
import collections
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical, Normal
import matplotlib.pyplot as plt
import os
import json
from datetime import datetime
import copy
import sys
from scipy.optimize import differential_evolution

# 自定义JSON编码器处理NumPy和PyTorch数据类型
class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, 
                             np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
            return int(obj)
        elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
            return float(obj)
        elif isinstance(obj, np.bool_):
            return bool(obj)
        elif isinstance(obj, torch.Tensor):
            return obj.cpu().numpy().tolist()
        return super(NumpyEncoder, self).default(obj)

# 创建保存结果的目录
def create_save_dirs():
    """创建保存模型和结果的目录"""
    base_dir = "ppo_multi_scale_evolutionary_results"
    model_dir = os.path.join(base_dir, "models")
    plot_dir = os.path.join(base_dir, "plots")
    curriculum_dir = os.path.join(base_dir, "curriculum")
    evolution_dir = os.path.join(base_dir, "evolution")
    network_dir = os.path.join(base_dir, "network_weights")
    multi_scale_dir = os.path.join(base_dir, "multi_scale")
    
    for directory in [base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir, multi_scale_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)
    
    return base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir, multi_scale_dir

# 创建目录
BASE_DIR, MODEL_DIR, PLOT_DIR, CURRICULUM_DIR, EVOLUTION_DIR, NETWORK_DIR, MULTI_SCALE_DIR = create_save_dirs()

# ====================== 环境与课程学习组件 ======================
class CustomCartPole(gym.Env):
    """自定义CartPole环境，支持动态参数调整"""
    def __init__(self, gravity=9.8, pole_length=0.5):
        super().__init__()
        self.base_env = gym.make('CartPole-v1')
        self.gravity = gravity
        self.pole_length = pole_length
        self.observation_space = self.base_env.observation_space
        self.action_space = self.base_env.action_space
        
    def step(self, action):
        # 应用当前物理参数
        self.base_env.unwrapped.gravity = self.gravity
        self.base_env.unwrapped.length = self.pole_length
        # 直接返回gymnasium的5个返回值
        return self.base_env.step(action)
    
    def reset(self, **kwargs):
        result = self.base_env.reset(**kwargs)
        # 处理gymnasium返回的2个值
        state, info = result
        return state, info
    
    def render(self, mode='human'):
        return self.base_env.render(mode)
    
    def close(self):
        return self.base_env.close()

class DifficultyScheduler:
    """动态难度调度器 - 课程学习核心"""
    def __init__(self, 
                 initial_gravity=9.8, 
                 initial_pole_length=0.5,
                 gravity_factor=0.1,
                 pole_length_factor=0.05,
                 performance_threshold=195.0):
        # 初始参数
        self.gravity = initial_gravity
        self.pole_length = initial_pole_length
        self.initial_gravity = initial_gravity
        self.initial_pole_length = initial_pole_length
        
        # 调整系数
        self.gravity_factor = gravity_factor
        self.pole_length_factor = pole_length_factor
        self.performance_threshold = performance_threshold
        
        # 跟踪变量
        self.t_step = 0
        self.difficulty_history = []
        
    def update_difficulty(self, performance):
        """根据性能动态更新难度参数"""
        self.t_step += 1
        
        # 重力调整：正弦波动（周期性增加挑战性）
        self.gravity = self.initial_gravity * (1 + self.gravity_factor * np.sin(self.t_step/100))
        
        # 杆长调整：渐进式增加（基于性能达标）
        if performance > self.performance_threshold:
            self.pole_length *= (1 + self.pole_length_factor)
        
        # 记录历史
        self.difficulty_history.append({
            'step': self.t_step,
            'gravity': self.gravity,
            'pole_length': self.pole_length,
            'performance': performance
        })
        
        return self.gravity, self.pole_length
    
    def save_difficulty_progression(self, filename="difficulty_progression.json"):
        """保存难度变化历史"""
        filepath = os.path.join(CURRICULUM_DIR, filename)
        with open(filepath, 'w') as f:
            json.dump(self.difficulty_history, f, indent=4, cls=NumpyEncoder)
        return filepath
    
    def plot_difficulty_progression(self, filename="difficulty_progression.png"):
        """可视化难度变化曲线"""
        if not self.difficulty_history:
            return None
            
        steps = [entry['step'] for entry in self.difficulty_history]
        gravities = [entry['gravity'] for entry in self.difficulty_history]
        pole_lengths = [entry['pole_length'] for entry in self.difficulty_history]
        performances = [entry['performance'] for entry in self.difficulty_history]
        
        plt.figure(figsize=(12, 8))
        
        # 重力变化曲线
        plt.subplot(2, 1, 1)
        plt.plot(steps, gravities, label='Gravity', color='blue')
        plt.xlabel('Training Steps')
        plt.ylabel('Gravity (m/s²)')
        plt.title('Gravity Variation Over Training')
        plt.grid(True)
        
        # 杆长和性能曲线
        plt.subplot(2, 1, 2)
        plt.plot(steps, pole_lengths, label='Pole Length', color='green')
        plt.plot(steps, performances, label='Performance', color='red', linestyle='--')
        plt.xlabel('Training Steps')
        plt.ylabel('Value')
        plt.title('Pole Length and Performance Progression')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        # 保存图片
        filepath = os.path.join(CURRICULUM_DIR, filename)
        plt.savefig(filepath, dpi=300, bbox_inches='tight')
        plt.close()
        
        return filepath

# ====================== PPO经验回放缓冲区 ======================
class PPOBuffer:
    """PPO经验回放缓冲区 - 存储轨迹数据"""
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)
        self.states = []
        self.actions = []
        self.rewards = []
        self.next_states = []
        self.dones = []
        self.log_probs = []
        self.values = []
        
    def add(self, state, action, reward, next_state, done, log_prob, value):
        """添加经验到缓冲区"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.next_states.append(next_state)
        self.dones.append(done)
        self.log_probs.append(log_prob)
        self.values.append(value)
    
    def clear(self):
        """清空当前回合数据"""
        self.states = []
        self.actions = []
        self.rewards = []
        self.next_states = []
        self.dones = []
        self.log_probs = []
        self.values = []
    
    def get_all_data(self):
        """获取所有数据"""
        return (self.states, self.actions, self.rewards, 
                self.next_states, self.dones, self.log_probs, self.values)
    
    def size(self):
        """返回缓冲区大小"""
        return len(self.states)

# ====================== 多时间尺度PPO网络 ======================
class MultiScaleActorCritic(nn.Module):
    """多时间尺度Actor-Critic网络：包含快尺度(STM)和慢尺度(LTM)网络"""
    def __init__(self, state_dim, action_dim, stm_dim=64, ltm_dim=64, is_discrete=True):
        super(MultiScaleActorCritic, self).__init__()
        self.is_discrete = is_discrete
        
        # 共享特征提取层
        self.shared_fc1 = nn.Linear(state_dim, 128)
        self.shared_fc2 = nn.Linear(128, 128)
        
        # 快尺度网络 (Short-Term Memory) - 处理即时决策
        self.stm_actor_fc1 = nn.Linear(128, stm_dim)
        self.stm_actor_fc2 = nn.Linear(stm_dim, stm_dim)
        
        self.stm_critic_fc1 = nn.Linear(128, stm_dim)
        self.stm_critic_fc2 = nn.Linear(stm_dim, stm_dim)
        
        # 慢尺度网络 (Long-Term Memory) - 处理长期策略
        self.ltm_actor_fc1 = nn.Linear(128, ltm_dim)
        self.ltm_actor_fc2 = nn.Linear(ltm_dim, ltm_dim)
        
        self.ltm_critic_fc1 = nn.Linear(128, ltm_dim)
        self.ltm_critic_fc2 = nn.Linear(ltm_dim, ltm_dim)
        
        # 注意力机制：动态权衡快慢尺度的贡献
        self.actor_attention = nn.Linear(stm_dim + ltm_dim, 2)
        self.critic_attention = nn.Linear(stm_dim + ltm_dim, 2)
        
        # 最终输出层
        if is_discrete:
            self.actor_output = nn.Linear(stm_dim + ltm_dim, action_dim)
        else:
            self.actor_mean = nn.Linear(stm_dim + ltm_dim, action_dim)
            self.actor_logstd = nn.Parameter(torch.zeros(1, action_dim))
        
        self.critic_output = nn.Linear(stm_dim + ltm_dim, 1)
        
    def forward(self, x):
        # 确保输入数据类型与模型权重一致
        if x.dtype != torch.float32:
            x = x.to(torch.float32)
            
        # 共享特征提取
        shared_features = F.relu(self.shared_fc1(x))
        shared_features = F.relu(self.shared_fc2(shared_features))
        
        # 快尺度处理
        stm_actor = F.relu(self.stm_actor_fc1(shared_features))
        stm_actor = F.relu(self.stm_actor_fc2(stm_actor))
        
        stm_critic = F.relu(self.stm_critic_fc1(shared_features))
        stm_critic = F.relu(self.stm_critic_fc2(stm_critic))
        
        # 慢尺度处理
        ltm_actor = F.relu(self.ltm_actor_fc1(shared_features))
        ltm_actor = F.relu(self.ltm_actor_fc2(ltm_actor))
        
        ltm_critic = F.relu(self.ltm_critic_fc1(shared_features))
        ltm_critic = F.relu(self.ltm_critic_fc2(ltm_critic))
        
        # 合并特征
        actor_combined = torch.cat([stm_actor, ltm_actor], dim=1)
        critic_combined = torch.cat([stm_critic, ltm_critic], dim=1)
        
        # 注意力权重
        actor_attention_weights = F.softmax(self.actor_attention(actor_combined), dim=1)
        critic_attention_weights = F.softmax(self.critic_attention(critic_combined), dim=1)
        
        # 应用注意力权重
        stm_actor_weighted = stm_actor * actor_attention_weights[:, 0].unsqueeze(1)
        ltm_actor_weighted = ltm_actor * actor_attention_weights[:, 1].unsqueeze(1)
        
        stm_critic_weighted = stm_critic * critic_attention_weights[:, 0].unsqueeze(1)
        ltm_critic_weighted = ltm_critic * critic_attention_weights[:, 1].unsqueeze(1)
        
        # 最终输出
        actor_features = torch.cat([stm_actor_weighted, ltm_actor_weighted], dim=1)
        critic_features = torch.cat([stm_critic_weighted, ltm_critic_weighted], dim=1)
        
        if self.is_discrete:
            action_logits = self.actor_output(actor_features)
            action_probs = F.softmax(action_logits, dim=-1)
        else:
            action_mean = self.actor_mean(actor_features)
            action_logstd = self.actor_logstd.expand_as(action_mean)
            action_probs = (action_mean, action_logstd)
        
        state_values = self.critic_output(critic_features)
        
        return action_probs, state_values, actor_attention_weights, critic_attention_weights
    
    def act(self, state):
        """选择动作并返回概率信息"""
        state_tensor = torch.FloatTensor(state).unsqueeze(0)
        action_probs, state_value, _, _ = self.forward(state_tensor)
        
        if self.is_discrete:
            dist = Categorical(action_probs)
            action = dist.sample()
            log_prob = dist.log_prob(action)
            entropy = dist.entropy()
        else:
            action_mean, action_logstd = action_probs
            dist = Normal(action_mean, action_logstd.exp())
            action = dist.sample()
            log_prob = dist.log_prob(action).sum(-1)
            entropy = dist.entropy().sum(-1)
        
        return action.item(), log_prob.item(), entropy.item(), state_value.item()

# ====================== 多时间尺度PPO算法 ======================
class MultiScalePPO:
    """多时间尺度PPO算法"""
    def __init__(self, state_dim, action_dim, is_discrete=True,
                 actor_lr=3e-4, critic_lr=1e-3, gamma=0.99, 
                 gae_lambda=0.95, clip_epsilon=0.2, 
                 ppo_epochs=4, batch_size=64, entropy_coef=0.01,
                 stm_update_freq=1, ltm_update_freq=5,
                 device=torch.device("cpu")):
        
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.is_discrete = is_discrete
        self.gamma = gamma
        self.gae_lambda = gae_lambda
        self.clip_epsilon = clip_epsilon
        self.ppo_epochs = ppo_epochs
        self.batch_size = batch_size
        self.entropy_coef = entropy_coef
        self.device = device
        
        # 时间尺度参数
        self.stm_update_freq = stm_update_freq
        self.ltm_update_freq = ltm_update_freq
        self.stm_update_count = 0
        self.ltm_update_count = 0
        self.total_update_count = 0
        
        # 网络和优化器
        self.actor_critic = MultiScaleActorCritic(state_dim, action_dim, is_discrete=is_discrete).to(device)
        self.actor_optimizer = optim.Adam(self.actor_critic.parameters(), lr=actor_lr)
        self.critic_optimizer = optim.Adam(self.actor_critic.parameters(), lr=critic_lr)
        
        # 旧网络用于重要性采样
        self.actor_critic_old = MultiScaleActorCritic(state_dim, action_dim, is_discrete=is_discrete).to(device)
        self.actor_critic_old.load_state_dict(self.actor_critic.state_dict())
        
    def compute_gae(self, rewards, values, next_values, dones):
        """计算广义优势估计 (GAE)"""
        advantages = np.zeros_like(rewards)
        last_advantage = 0
        
        for t in reversed(range(len(rewards))):
            if dones[t]:
                delta = rewards[t] - values[t]
                last_advantage = delta
            else:
                delta = rewards[t] + self.gamma * next_values[t] - values[t]
                last_advantage = delta + self.gamma * self.gae_lambda * last_advantage
            advantages[t] = last_advantage
        
        returns = advantages + values
        return advantages, returns
    
    def update(self, states, actions, old_log_probs, rewards, next_states, dones, scale='full'):
        """PPO更新步骤"""
        states = torch.FloatTensor(np.array(states)).to(self.device)
        actions = torch.LongTensor(actions) if self.is_discrete else torch.FloatTensor(actions)
        actions = actions.to(self.device)
        old_log_probs = torch.FloatTensor(old_log_probs).to(self.device)
        rewards = torch.FloatTensor(rewards).to(self.device)
        next_states = torch.FFloatTensor(np.array(next_states)).to(self.device)
        dones = torch.FloatTensor(dones).to(self.device)
        
        # 计算价值估计
        with torch.no_grad():
            _, next_values, _, _ = self.actor_critic_old(next_states)
            next_values = next_values.squeeze().cpu().numpy()
            _, values, _, _ = self.actor_critic_old(states)
            values = values.squeeze().cpu().numpy()
        
        # 计算GAE和回报
        advantages, returns = self.compute_gae(
            rewards.cpu().numpy(), values, next_values, dones.cpu().numpy()
        )
        advantages = torch.FloatTensor(advantages).to(self.device)
        returns = torch.FloatTensor(returns).to(self.device)
        
        # 多轮PPO更新
        for _ in range(self.ppo_epochs):
            # 获取新策略的概率
            new_action_probs, new_values, actor_attention, critic_attention = self.actor_critic(states)
            
            if self.is_discrete:
                dist = Categorical(new_action_probs)
                new_log_probs = dist.log_prob(actions)
                entropy = dist.entropy()
            else:
                action_mean, action_logstd = new_action_probs
                dist = Normal(action_mean, action_logstd.exp())
                new_log_probs = dist.log_prob(actions).sum(-1)
                entropy = dist.entropy().sum(-1)
            
            # 计算比率和替代损失
            ratios = torch.exp(new_log_probs - old_log_probs)
            surr1 = ratios * advantages
            surr2 = torch.clamp(ratios, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
            
            # 策略损失
            policy_loss = -torch.min(surr1, surr2).mean()
            
            # 价值损失
            value_loss = F.mse_loss(new_values.squeeze(), returns)
            
            # 熵奖励
            entropy_loss = -entropy.mean()
            
            # 总损失
            total_loss = policy_loss + 0.5 * value_loss + self.entropy_coef * entropy_loss
            
            # 根据时间尺度调整学习率
            if scale == 'stm':
                current_actor_lr = 3e-4 * 2  # 快尺度使用更高学习率
                current_critic_lr = 1e-3 * 2
            elif scale == 'ltm':
                current_actor_lr = 3e-4 * 0.5  # 慢尺度使用更低学习率
                current_critic_lr = 1e-3 * 0.5
            else:
                current_actor_lr = 3e-4
                current_critic_lr = 1e-3
            
            # 更新actor
            for param_group in self.actor_optimizer.param_groups:
                param_group['lr'] = current_actor_lr
            self.actor_optimizer.zero_grad()
            policy_loss.backward(retain_graph=True)
            torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), 0.5)
            self.actor_optimizer.step()
            
            # 更新critic
            for param_group in self.critic_optimizer.param_groups:
                param_group['lr'] = current_critic_lr
            self.critic_optimizer.zero_grad()
            value_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(), 0.5)
            self.critic_optimizer.step()
        
        # 更新旧网络
        self.actor_critic_old.load_state_dict(self.actor_critic.state_dict())
        
        # 更新计数器
        self.total_update_count += 1
        if scale == 'stm':
            self.stm_update_count += 1
        elif scale == 'ltm':
            self.ltm_update_count += 1
        
        return total_loss.item(), actor_attention.mean(dim=0).detach().cpu().numpy()
    
    def save_model(self, filepath):
        """保存模型权重"""
        torch.save({
            'actor_critic_state_dict': self.actor_critic.state_dict(),
            'actor_critic_old_state_dict': self.actor_critic_old.state_dict(),
            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
            'critic_optimizer_state_dict': self.critic_optimizer.state_dict()
        }, filepath)
    
    def load_model(self, filepath):
        """加载模型权重"""
        checkpoint = torch.load(filepath)
        self.actor_critic.load_state_dict(checkpoint['actor_critic_state_dict'])
        self.actor_critic_old.load_state_dict(checkpoint['actor_critic_old_state_dict'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer_state_dict'])
    
    def evaluate(self, env, eval_episodes=10):
        """评估策略在环境中的性能"""
        total_return = 0
        for _ in range(eval_episodes):
            state, _ = env.reset()
            done = False
            episode_return = 0
            while not done:
                action, _, _, _ = self.actor_critic_old.act(state)
                next_state, reward, terminated, truncated, _ = env.step(action)
                done = terminated or truncated
                episode_return += reward
                state = next_state
            total_return += episode_return
        
        avg_return = total_return / eval_episodes
        return avg_return

# ====================== 差分进化优化课程学习参数 ======================
def optimize_curriculum_parameters():
    """
    使用差分进化算法优化课程学习参数
    
    返回:
        optimal_params: 优化后的课程学习参数
        optimal_fitness: 最优适应度（平均回报）
    """
    # 定义参数边界
    bounds = [
        (5.0, 15.0),        # initial_gravity
        (0.3, 1.0),         # initial_pole_length
        (0.05, 0.3),        # gravity_factor
        (0.01, 0.1),        # pole_length_factor
        (180.0, 200.0)      # performance_threshold
    ]
    
    def objective_function(params):
        """
        目标函数：最小化负回报（即最大化回报）
        """
        # 参数边界检查
        clipped_params = []
        for i, param in enumerate(params):
            low, high = bounds[i]
            clipped_params.append(np.clip(param, low, high))
        
        # 解包参数
        initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = clipped_params
        
        # 设置随机种子保证可复现性
        random.seed(0)
        np.random.seed(0)
        torch.manual_seed(0)
        
        # 超参数设置
        actor_lr = 3e-4
        critic_lr = 1e-3
        num_episodes = 500  # 为了优化效率，使用较少的回合数
        gamma = 0.99
        gae_lambda = 0.95
        clip_epsilon = 0.2
        ppo_epochs = 4
        batch_size = 64
        entropy_coef = 0.01
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        
        # 创建环境和难度调度器
        env = CustomCartPole()
        difficulty_scheduler = DifficultyScheduler(
            initial_gravity=initial_gravity,
            initial_pole_length=initial_pole_length,
            gravity_factor=gravity_factor,
            pole_length_factor=pole_length_factor,
            performance_threshold=performance_threshold
        )

        # 获取环境维度
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n
        
        # 初始化PPO智能体
        agent = MultiScalePPO(
            state_dim=state_dim,
            action_dim=action_dim,
            is_discrete=True,
            actor_lr=actor_lr,
            critic_lr=critic_lr,
            gamma=gamma,
            gae_lambda=gae_lambda,
            clip_epsilon=clip_epsilon,
            ppo_epochs=ppo_epochs,
            batch_size=batch_size,
            entropy_coef=entropy_coef,
            device=device
        )
        
        buffer = PPOBuffer(10000)
        return_list = []
        
        # 训练循环
        for episode in range(num_episodes):
            # 每10个回合评估并更新难度
            if episode > 0 and episode % 10 == 0 and len(return_list) >= 10:
                avg_performance = np.mean(return_list[-10:])
                gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
                env.gravity = gravity
                env.pole_length = pole_length
            
            # 开始新回合
            episode_return = 0
            state, info = env.reset()
            done = False
            
            while not done:
                action, log_prob, entropy, value = agent.actor_critic.act(state)
                next_state, reward, terminated, truncated, info = env.step(action)
                done = terminated or truncated
                
                # 存储经验
                buffer.add(state, action, reward, next_state, done, log_prob, value)
                state = next_state
                episode_return += reward
            
            return_list.append(episode_return)
            
            # 更新PPO
            if buffer.size() >= batch_size:
                states, actions, rewards, next_states, dones, log_probs, values = buffer.get_all_data()
                
                # 多时间尺度训练
                if agent.stm_update_count % agent.stm_update_freq == 0:
                    agent.update(states, actions, log_probs, rewards, next_states, dones, 'stm')
                
                if agent.ltm_update_count % agent.ltm_update_freq == 0:
                    agent.update(states, actions, log_probs, rewards, next_states, dones, 'ltm')
                
                # 完整训练
                agent.update(states, actions, log_probs, rewards, next_states, dones, 'full')
                
                buffer.clear()
        
        # 评估最终性能
        eval_returns = []
        for _ in range(10):  # 10个评估回合
            episode_return = 0
            state, info = env.reset()
            done = False
            
            while not done:
                action, _, _, _ = agent.actor_critic.act(state)
                next_state, reward, terminated, truncated, info = env.step(action)
                done = terminated or truncated
                state = next_state
                episode_return += reward
            
            eval_returns.append(episode_return)
        
        # 返回负的平均评估回报（因为差分进化最小化目标函数）
        return -np.mean(eval_returns)
    
    # 运行差分进化算法
    print("开始使用差分进化算法优化课程学习参数...")
    result = differential_evolution(
        objective_function,
        bounds,
        strategy='best1bin',
        popsize=5,          # 种群大小
        maxiter=5,          # 迭代次数
        tol=0.01,
        mutation=(0.5, 1),
        recombination=0.7,
        seed=42,
        disp=True
    )
    
    print("优化完成!")
    print("最优参数:", result.x)
    print("最佳适应度（平均回报）:", -result.fun)
    
    # 保存优化结果
    optimization_result = {
        'optimal_parameters': result.x.tolist(),
        'optimal_fitness': -result.fun,
        'optimization_bounds': bounds,
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    }
    
    result_file = os.path.join(EVOLUTION_DIR, "curriculum_optimization_result.json")
    with open(result_file, 'w') as f:
        json.dump(optimization_result, f, indent=4, cls=NumpyEncoder)
    
    return result.x, -result.fun

# ====================== 工具函数 ======================
def moving_average(data, window_size):
    """计算移动平均值"""
    return np.convolve(data, np.ones(window_size)/window_size, mode='valid')

def save_training_plots(return_list, mv_return, episode, prefix=""):
    """保存训练曲线图"""
    episodes_list = list(range(len(return_list)))
    
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('Multi-Scale PPO on CartPole-v1')
    
    if len(mv_return) > 0:
        plt.subplot(1, 2, 2)
        mv_episodes = list(range(len(mv_return)))
        plt.plot(mv_episodes, mv_return)
        plt.xlabel('Episodes')
        plt.ylabel('Moving Average Returns')
        plt.title('Moving Average (window=9)')
    
    plt.tight_layout()
    
    # 保存图片
    filename = f"{prefix}training_progress_episode_{episode}.png"
    filepath = os.path.join(PLOT_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

def save_training_info(hyperparams, return_list, mv_return):
    """保存训练信息和结果"""
    info = {
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
        'hyperparameters': hyperparams,
        'final_results': {
            'last_10_episodes_mean_return': np.mean(return_list[-10:]) if len(return_list) >= 10 else 0,
            'last_10_episodes_mean_mv_return': np.mean(mv_return[-10:]) if len(mv_return) >= 10 else 0,
            'max_return': np.max(return_list) if return_list else 0,
            'min_return': np.min(return_list) if return_list else 0,
            'total_episodes': len(return_list)
        }
    }
    
    info_file = os.path.join(BASE_DIR, "training_info.json")
    with open(info_file, 'w') as f:
        json.dump(info, f, indent=4, cls=NumpyEncoder)
    
    return info_file

def save_multi_scale_analysis(attention_history, filename="multi_scale_analysis.png"):
    """保存多时间尺度分析结果"""
    steps = list(range(len(attention_history)))
    stm_attention = [attn[0] for attn in attention_history]
    ltm_attention = [attn[1] for attn in attention_history]
    
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.plot(steps, stm_attention, label='STM Attention', color='blue')
    plt.plot(steps, ltm_attention, label='LTM Attention', color='red')
    plt.xlabel('Training Steps')
    plt.ylabel('Attention Weights')
    plt.title('Multi-Scale Attention Weights')
    plt.legend()
    plt.grid(True)
    
    plt.subplot(1, 2, 2)
    window = 50
    if len(stm_attention) > window:
        stm_smooth = np.convolve(stm_attention, np.ones(window)/window, mode='valid')
        ltm_smooth = np.convolve(ltm_attention, np.ones(window)/window, mode='valid')
        plt.plot(range(len(stm_smooth)), stm_smooth, label='STM (Smoothed)', color='blue')
        plt.plot(range(len(ltm_smooth)), ltm_smooth, label='LTM (Smoothed)', color='red')
        plt.xlabel('Training Steps')
        plt.ylabel('Smoothed Attention')
        plt.title('Smoothed Multi-Scale Attention')
        plt.legend()
        plt.grid(True)
    
    plt.tight_layout()
    
    filepath = os.path.join(MULTI_SCALE_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

# ====================== 主训练程序 ======================
if __name__ == "__main__":
    # 超参数设置
    actor_lr = 3e-4
    critic_lr = 1e-3
    num_episodes = 500
    gamma = 0.99
    gae_lambda = 0.95
    clip_epsilon = 0.2
    ppo_epochs = 4
    batch_size = 64
    entropy_coef = 0.01
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    
    # 多时间尺度参数
    stm_update_freq = 1    # 快尺度更新频率
    ltm_update_freq = 5    # 慢尺度更新频率

    # 询问是否使用差分进化优化
    use_evolutionary = input("是否使用差分进化算法优化课程学习参数? (y/n): ").lower().strip() == 'y'
    
    if use_evolutionary:
        print("使用差分进化算法优化课程学习参数...")
        optimal_params, optimal_fitness = optimize_curriculum_parameters()
        
        # 解包最优参数
        initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = optimal_params
        
        print(f"优化结果:")
        print(f"  初始重力: {initial_gravity:.2f}")
        print(f"  初始杆长: {initial_pole_length:.4f}")
        print(f"  重力因子: {gravity_factor:.3f}")
        print(f"  杆长因子: {pole_length_factor:.3f}")
        print(f"  性能阈值: {performance_threshold:.1f}")
        print(f"  预期回报: {optimal_fitness:.1f}")
    else:
        # 使用默认参数
        initial_gravity = 9.8
        initial_pole_length = 0.5
        gravity_factor = 0.1
        pole_length_factor = 0.05
        performance_threshold = 195.0
        print("使用默认课程学习参数")

    # 存储超参数
    hyperparams = {
        'actor_learning_rate': actor_lr,       
        'critic_learning_rate': critic_lr,
        'num_episodes': num_episodes,
        'gamma': gamma,
        'gae_lambda': gae_lambda,
        'clip_epsilon': clip_epsilon,
        'ppo_epochs': ppo_epochs,
        'batch_size': batch_size,
        'entropy_coef': entropy_coef,
        'device': str(device),
        'stm_update_freq': stm_update_freq,
        'ltm_update_freq': ltm_update_freq,
        'curriculum_learning': True,
        'evolutionary_optimization': use_evolutionary,
        'curriculum_params': {
            'initial_gravity': initial_gravity,
            'initial_pole_length': initial_pole_length,
            'gravity_factor': gravity_factor,
            'pole_length_factor': pole_length_factor,
            'performance_threshold': performance_threshold
        }
    }

    # 创建环境和难度调度器
    env = CustomCartPole()
    difficulty_scheduler = DifficultyScheduler(
        initial_gravity=initial_gravity,
        initial_pole_length=initial_pole_length,
        gravity_factor=gravity_factor,
        pole_length_factor=pole_length_factor,
        performance_threshold=performance_threshold
    )

    # 设置随机种子保证可复现性
    random.seed(0)
    np.random.seed(0)
    env.reset(seed=0)
    torch.manual_seed(0)

    # 获取环境维度
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    
    # 初始化性能记录列表
    return_list = []
    attention_history = []  # 记录注意力权重历史
    
    # 初始化PPO智能体
    agent = MultiScalePPO(
        state_dim=state_dim,
        action_dim=action_dim,
        is_discrete=True,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        gamma=gamma,
        gae_lambda=gae_lambda,
        clip_epsilon=clip_epsilon,
        ppo_epochs=ppo_epochs,
        batch_size=batch_size,
        entropy_coef=entropy_coef,
        stm_update_freq=stm_update_freq,
        ltm_update_freq=ltm_update_freq,
        device=device
    )

    buffer = PPOBuffer(10000)
    
    print(f"开始训练多时间尺度PPO算法(CartPole-v1 + 课程学习)...")
    print(f"结果将保存在: {BASE_DIR}")

    # 训练循环
    for episode in range(num_episodes):
        # 每10个回合评估并更新难度
        if episode > 0 and episode % 10 == 0 and len(return_list) >= 10:
            avg_performance = np.mean(return_list[-10:])
            gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
            env.gravity = gravity
            env.pole_length = pole_length
            
            print(f"Episode {episode}: Updated gravity to {gravity:.2f}, pole length to {pole_length:.4f}")
        
        # 开始新回合
        episode_return = 0
        state, info = env.reset()
        done = False
        
        while not done:
            action, log_prob, entropy, value = agent.actor_critic.act(state)
            next_state, reward, terminated, truncated, info = env.step(action)
            done = terminated or truncated
            
            # 获取注意力权重
            state_tensor = torch.tensor(np.array([state]), dtype=torch.float32).to(device)
            _, _, actor_attention, _ = agent.actor_critic(state_tensor)
            attention_weights = actor_attention.mean(dim=0).detach().cpu().numpy()
            
            # 存储经验
            buffer.add(state, action, reward, next_state, done, log_prob, value)
            state = next_state
            episode_return += reward
        
        return_list.append(episode_return)
        
        # 更新PPO
        if buffer.size() >= batch_size:
            states, actions, rewards, next_states, dones, log_probs, values = buffer.get_all_data()
            
            # 多时间尺度训练
            if agent.stm_update_count % agent.stm_update_freq == 0:
                loss, attn_weights = agent.update(states, actions, log_probs, rewards, next_states, dones, 'stm')
                attention_history.append(attn_weights)
            
            if agent.ltm_update_count % agent.ltm_update_freq == 0:
                loss, attn_weights = agent.update(sttes, actions, log_probs, rewards, next_states, dones, 'ltm')
                attention_history.append(attn_weights)
            
            # 完整训练
            loss, attn_weights = agent.update(states, actions, log_probs, rewards, next_states, dones, 'full')
            attention_history.append(attn_weights)
            
            buffer.clear()
        
        # 打印进度
        if episode % 10 == 0:
            avg_return = np.mean(return_list[-10:]) if len(return_list) >= 10 else np.mean(return_list)
            print(f"Episode {episode}: Return = {episode_return}, Avg Return (last 10) = {avg_return:.2f}")
    
    # 训练完成后保存最终结果
    if len(return_list) >= 9:
        mv_return = moving_average(return_list, 9)
    else:
        mv_return = return_list.copy()

    # 保存最终模型
    final_model_path = os.path.join(MODEL_DIR, "ppo_model_final.pth")
    agent.save_model(final_model_path)

    # 保存最终训练曲线
    final_plot_path = save_training_plots(return_list, mv_return, num_episodes, "final_")

    # 保存训练信息
    info_path = save_training_info(hyperparams, return_list, mv_return)

    # 保存课程学习过程数据
    diff_progression_path = difficulty_scheduler.save_difficulty_progression()
    diff_plot_path = difficulty_scheduler.plot_difficulty_progression()

    # 保存多时间尺度分析
    if attention_history:
        final_attn_plot_path = save_multi_scale_analysis(attention_history, "attention_final.png")
        
        # 保存注意力权重数据
        attn_data_path = os.path.join(MULTI_SCALE_DIR, "attention_weights.json")
        with open(attn_data_path, 'w') as f:
            json.dump({'attention_weights': attention_history}, f, indent=4, cls=NumpyEncoder)

    print(f"\n训练完成！")
    print(f"最终模型已保存: {final_model_path}")
    print(f"训练曲线图已保存: {final_plot_path}")
    print(f"训练信息已保存: {info_path}")
    print(f"课程学习过程数据已保存: {diff_progression_path}")
    print(f"课程学习可视化图已保存: {diff_plot_path}")
    if attention_history:
        print(f"多时间尺度分析图已保存: {final_attn_plot_path}")
    print(f"最后10个回合的平均回报: {np.mean(return_list[-10:]):.3f}")

    # 显示最终图表
    episodes_list = list(range(len(return_list)))
    plt.figure(figsize=(14, 6))

    plt.subplot(1, 2, 1)
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('Multi-Scale PPO on CartPole-v1 (Curriculum Learning)')

    plt.subplot(1, 2, 2)
    mv_episodes = list(range(len(mv_return)))
    plt.plot(mv_episodes, mv_return)
    plt.xlabel('Episodes')
    plt.ylabel('Moving Average Returns')
    plt.title('Moving Average (window=9)')

    plt.tight_layout()
    plt.savefig(os.path.join(PLOT_DIR, "final_training_results.png"), dpi=300, bbox_inches='tight')
    plt.show()

    # 如果有注意力历史，显示注意力权重变化
    if attention_history:
        plt.figure(figsize=(12, 5))
        steps = list(range(len(attention_history)))
        stm_attention = [attn[0] for attn in attention_history]
        ltm_attention = [attn[1] for attn in attention_history]
        
        plt.subplot(1, 2, 1)
        plt.plot(steps, stm_attention, label='STM Attention', color='blue')
        plt.plot(steps, ltm_attention, label='LTM Attention', color='red')
        plt.xlabel('Training Steps')
        plt.ylabel('Attention Weight')
        plt.title('Multi-Scale Attention Weights')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(1, 2, 2)
        window = 50
        if len(stm_attention) > window:
            stm_smooth = np.convolve(stm_attention, np.ones(window)/window, mode='valid')
            ltm_smooth = np.convolve(ltm_attention, np.ones(window)/window, mode='valid')
            plt.plot(range(len(stm_smooth)), stm_smooth, label='STM (Smoothed)', color='blue')
            plt.plot(range(len(ltm_smooth)), ltm_smooth, label='LTM (Smoothed)', color='red')
            plt.xlabel('Training Steps')
            plt.ylabel('Smoothed Attention')
            plt.title('Smoothed Multi-Scale Attention')
            plt.legend()
            plt.grid(True)
        
        plt.tight_layout()
        plt.savefig(os.path.join(MULTI_SCALE_DIR, "final_attention_weights.png"), 
                    dpi=300, bbox_inches='tight')
        plt.show()