import random
import gymnasium as gym
import numpy as np
import collections
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import matplotlib.pyplot as plt
import os
import json
from datetime import datetime
import copy
import sys
from scipy.optimize import differential_evolution

# 自定义JSON编码器处理NumPy和PyTorch数据类型
class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, 
                             np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
            return int(obj)
        elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
            return float(obj)
        elif isinstance(obj, np.bool_):
            return bool(obj)
        elif isinstance(obj, torch.Tensor):
            return obj.cpu().numpy().tolist()
        return super(NumpyEncoder, self).default(obj)

# 创建保存结果的目录
def create_save_dirs():
    """创建保存模型和结果的目录"""
    base_dir = "sac_multi_scale_evolutionary_results"
    model_dir = os.path.join(base_dir, "models")
    plot_dir = os.path.join(base_dir, "plots")
    curriculum_dir = os.path.join(base_dir, "curriculum")
    evolution_dir = os.path.join(base_dir, "evolution")
    network_dir = os.path.join(base_dir, "network_weights")
    multi_scale_dir = os.path.join(base_dir, "multi_scale")
    
    for directory in [base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir, multi_scale_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)
    
    return base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir, multi_scale_dir

# 创建目录
BASE_DIR, MODEL_DIR, PLOT_DIR, CURRICULUM_DIR, EVOLUTION_DIR, NETWORK_DIR, MULTI_SCALE_DIR = create_save_dirs()

# ====================== 环境与课程学习组件 ======================
class CustomCartPole(gym.Env):
    """自定义CartPole环境，支持动态参数调整"""
    def __init__(self, gravity=9.8, pole_length=0.5):
        super().__init__()
        self.base_env = gym.make('CartPole-v1')
        self.gravity = gravity
        self.pole_length = pole_length
        self.observation_space = self.base_env.observation_space
        self.action_space = self.base_env.action_space
        
    def step(self, action):
        # 应用当前物理参数
        self.base_env.unwrapped.gravity = self.gravity
        self.base_env.unwrapped.length = self.pole_length
        return self.base_env.step(action)
    
    def reset(self, **kwargs):
        result = self.base_env.reset(**kwargs)
        state, info = result
        return state, info
    
    def render(self, mode='human'):
        return self.base_env.render(mode)
    
    def close(self):
        return self.base_env.close()

class DifficultyScheduler:
    """动态难度调度器 - 课程学习核心"""
    def __init__(self, 
                 initial_gravity=9.8, 
                 initial_pole_length=0.5,
                 gravity_factor=0.1,
                 pole_length_factor=0.05,
                 performance_threshold=195.0):
        self.gravity = initial_gravity
        self.pole_length = initial_pole_length
        self.initial_gravity = initial_gravity
        self.initial_pole_length = initial_pole_length
        self.gravity_factor = gravity_factor
        self.pole_length_factor = pole_length_factor
        self.performance_threshold = performance_threshold
        self.t_step = 0
        self.difficulty_history = []
        
    def update_difficulty(self, performance):
        """根据性能动态更新难度参数"""
        self.t_step += 1
        self.gravity = self.initial_gravity * (1 + self.gravity_factor * np.sin(self.t_step/100))
        if performance > self.performance_threshold:
            self.pole_length *= (1 + self.pole_length_factor)
        self.difficulty_history.append({
            'step': self.t_step,
            'gravity': self.gravity,
            'pole_length': self.pole_length,
            'performance': performance
        })
        return self.gravity, self.pole_length
    
    def save_difficulty_progression(self, filename="difficulty_progression.json"):
        """保存难度变化历史"""
        filepath = os.path.join(CURRICULUM_DIR, filename)
        with open(filepath, 'w') as f:
            json.dump(self.difficulty_history, f, indent=4, cls=NumpyEncoder)
        return filepath

# ====================== 经验回放缓冲区 ======================
class ReplayBuffer:
    ''' 经验回放池 '''
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)
        self.states = []
        self.actions = []
        self.rewards = []
        self.next_states = []
        self.dones = []
        self.attention_weights = []
        
    def add(self, state, action, reward, next_state, done, attention_weights=None):
        """添加经验到缓冲区"""
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.next_states.append(next_state)
        self.dones.append(done)
        self.attention_weights.append(attention_weights if attention_weights is not None else [0.5, 0.5])
    
    def sample(self, batch_size, scale_type='full'):
        """从缓冲区采样批次数据"""
        if len(self.states) < batch_size:
            return None
            
        indices = np.random.choice(len(self.states), batch_size, replace=False)
        states = np.array(self.states)[indices]
        actions = np.array(self.actions)[indices]
        rewards = np.array(self.rewards)[indices]
        next_states = np.array(self.next_states)[indices]
        dones = np.array(self.dones)[indices]
        attention_weights = np.array(self.attention_weights)[indices]
        
        return states, actions, rewards, next_states, dones, attention_weights
    
    def clear(self):
        """清空当前回合数据"""
        self.states = []
        self.actions = []
        self.rewards = []
        self.next_states = []
        self.dones = []
        self.attention_weights = []
    
    def size(self):
        """返回缓冲区大小"""
        return len(self.states)

# ====================== 多时间尺度SAC网络 ======================
class MultiScalePolicyNetwork(nn.Module):
    """多时间尺度策略网络（Actor）"""
    def __init__(self, state_dim, action_dim, hidden_dim=256, 
                 stm_dim=64, ltm_dim=64, log_std_min=-20, log_std_max=2):
        super(MultiScalePolicyNetwork, self).__init__()
        self.log_std_min = log_std_min
        self.log_std_max = log_std_max
        
        # 共享特征提取层
        self.shared_net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 快尺度网络 (Short-Term Memory)
        self.stm_net = nn.Sequential(
            nn.Linear(hidden_dim, stm_dim),
            nn.ReLU()
        )
        
        # 慢尺度网络 (Long-Term Memory)
        self.ltm_net = nn.Sequential(
            nn.Linear(hidden_dim, ltm_dim),
            nn.ReLU()
        )
        
        # 注意力机制
        self.attention = nn.Linear(stm_dim + ltm_dim, 2)
        
        # 输出层
        self.mean_layer = nn.Linear(stm_dim + ltm_dim, action_dim)
        self.log_std_layer = nn.Linear(stm_dim + ltm_dim, action_dim)
        
    def forward(self, state):
        shared_features = self.shared_net(state)
        
        # 快尺度处理
        stm_features = self.stm_net(shared_features)
        
        # 慢尺度处理
        ltm_features = self.ltm_net(shared_features)
        
        # 合并特征并应用注意力
        combined = torch.cat([stm_features, ltm_features], dim=1)
        attention_weights = F.softmax(self.attention(combined), dim=1)
        
        # 应用注意力权重
        stm_weighted = stm_features * attention_weights[:, 0].unsqueeze(1)
        ltm_weighted = ltm_features * attention_weights[:, 1].unsqueeze(1)
        
        # 最终输出
        features = torch.cat([stm_weighted, ltm_weighted], dim=1)
        mean = self.mean_layer(features)
        log_std = self.log_std_layer(features)
        log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
        
        return mean, log_std, attention_weights
    
    def sample(self, state):
        """采样动作（重参数化技巧）"""
        mean, log_std, attention_weights = self.forward(state)
        std = log_std.exp()
        normal = Normal(mean, std)
        x_t = normal.rsample()  # 重参数化
        action = torch.tanh(x_t)
        
        # 计算对数概率
        log_prob = normal.log_prob(x_t)
        log_prob -= torch.log(1 - action.pow(2) + 1e-6)
        log_prob = log_prob.sum(1, keepdim=True)
        
        return action, log_prob, attention_weights
    
    def evaluate(self, state, epsilon=1e-6):
        """评估动作概率"""
        mean, log_std, attention_weights = self.forward(state)
        std = log_std.exp()
        normal = Normal(mean, std)
        x_t = normal.rsample()
        action = torch.tanh(x_t)
        
        log_prob = normal.log_prob(x_t)
        log_prob -= torch.log(1 - action.pow(2) + epsilon)
        log_prob = log_prob.sum(1, keepdim=True)
        
        return action, log_prob, attention_weights

class MultiScaleQNetwork(nn.Module):
    """多时间尺度Q网络（Critic）"""
    def __init__(self, state_dim, action_dim, hidden_dim=256, stm_dim=64, ltm_dim=64):
        super(MultiScaleQNetwork, self).__init__()
        
        # 共享特征提取层
        self.shared_net = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 快尺度网络
        self.stm_net = nn.Sequential(
            nn.Linear(hidden_dim, stm_dim),
            nn.ReLU()
        )
        
        # 慢尺度网络
        self.ltm_net = nn.Sequential(
            nn.Linear(hidden_dim, ltm_dim),
            nn.ReLU()
        )
        
        # 注意力机制
        self.attention = nn.Linear(stm_dim + ltm_dim, 2)
        
        # 输出层
        self.q_value = nn.Linear(stm_dim + ltm_dim, 1)
        
    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        shared_features = self.shared_net(x)
        
        # 快尺度处理
        stm_features = self.stm_net(shared_features)
        
        # 慢尺度处理
        ltm_features = self.ltm_net(shared_features)
        
        # 合并特征并应用注意力
        combined = torch.cat([stm_features, ltm_features], dim=1)
        attention_weights = F.softmax(self.attention(combined), dim=1)
        
        # 应用注意力权重
        stm_weighted = stm_features * attention_weights[:, 0].unsqueeze(1)
        ltm_weighted = ltm_features * attention_weights[:, 1].unsqueeze(1)
        
        # 最终输出
        features = torch.cat([stm_weighted, ltm_weighted], dim=1)
        q_value = self.q_value(features)
        
        return q_value, attention_weights

# ====================== 多时间尺度SAC算法 ======================
class MultiScaleSAC:
    """多时间尺度SAC算法"""
    def __init__(self, state_dim, action_dim, 
                 actor_lr=3e-4, critic_lr=3e-4, alpha_lr=3e-4,
                 gamma=0.99, tau=0.005, target_entropy=None,
                 stm_update_freq=1, ltm_update_freq=5,
                 device=torch.device("cpu")):
        
        self.device = device
        self.gamma = gamma
        self.tau = tau
        self.target_entropy = target_entropy if target_entropy is not None else -action_dim
        
        # 时间尺度参数
        self.stm_update_freq = stm_update_freq
        self.ltm_update_freq = ltm_update_freq
        self.stm_update_count = 0
        self.ltm_update_count = 0
        self.total_update_count = 0
        
        # 策略网络
        self.actor = MultiScalePolicyNetwork(state_dim, action_dim).to(device)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)
        
        # Q网络（双Q网络减少过估计）
        self.critic1 = MultiScaleQNetwork(state_dim, action_dim).to(device)
        self.critic2 = MultiScaleQNetwork(state_dim, action_dim).to(device)
        self.critic1_optimizer = optim.Adam(self.critic1.parameters(), lr=critic_lr)
        self.critic2_optimizer = optim.Adam(self.critic2.parameters(), lr=critic_lr)
        
        # 目标Q网络
        self.target_critic1 = MultiScaleQNetwork(state_dim, action_dim).to(device)
        self.target_critic2 = MultiScaleQNetwork(state_dim, action_dim).to(device)
        self.target_critic1.load_state_dict(self.critic1.state_dict())
        self.target_critic2.load_state_dict(self.critic2.state_dict())
        
        # 自动熵调节
        self.log_alpha = torch.tensor(np.log(0.1), dtype=torch.float32, requires_grad=True, device=device)
        self.alpha_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr)
    
    def select_action(self, state, evaluate=False):
        """选择动作"""
        state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
        
        if evaluate:
            with torch.no_grad():
                mean, log_std, _ = self.actor(state)
                action = torch.tanh(mean)
            return action.cpu().numpy()[0], _
        else:
            with torch.no_grad():
                action, _, attention_weights = self.actor.sample(state)
            return action.cpu().numpy()[0], attention_weights.cpu().numpy()[0]
    
    def update(self, batch, scale='full'):
        """SAC更新步骤"""
        states, actions, rewards, next_states, dones, _ = batch
        
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.FloatTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(self.device)
        
        # 更新Critic网络
        with torch.no_grad():
            next_actions, next_log_probs, _ = self.actor.evaluate(next_states)
            next_q1, _ = self.target_critic1(next_states, next_actions)
            next_q2, _ = self.target_critic2(next_states, next_actions)
            next_q = torch.min(next_q1, next_q2) - self.alpha * next_log_probs
            target_q = rewards + (1 - dones) * self.gamma * next_q
        
        # 更新Critic1
        current_q1, attention_weights1 = self.critic1(states, actions)
        critic1_loss = F.mse_loss(current_q1, target_q)
        self.critic1_optimizer.zero_grad()
        critic1_loss.backward()
        self.critic1_optimizer.step()
        
        # 更新Critic2
        current_q2, attention_weights2 = self.critic2(states, actions)
        critic2_loss = F.mse_loss(current_q2, target_q)
        self.critic2_optimizer.zero_grad()
        critic2_loss.backward()
        self.critic2_optimizer.step()
        
        # 更新Actor网络
        new_actions, new_log_probs, actor_attention = self.actor.evaluate(states)
        new_q1, _ = self.critic1(states, new_actions)
        new_q2, _ = self.critic2(states, new_actions)
        new_q = torch.min(new_q1, new_q2)
        
        actor_loss = (self.alpha * new_log_probs - new_q).mean()
        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()
        
        # 更新温度参数
        alpha_loss = -(self.log_alpha * (new_log_probs + self.target_entropy).detach()).mean()
        self.alpha_optimizer.zero_grad()
        alpha_loss.backward()
        self.alpha_optimizer.step()
        self.alpha = self.log_alpha.exp()
        
        # 更新目标网络
        for param, target_param in zip(self.critic1.parameters(), self.target_critic1.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
        
        for param, target_param in zip(self.critic2.parameters(), self.target_critic2.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
        
        # 更新计数器
        self.total_update_count += 1
        if scale == 'stm':
            self.stm_update_count += 1
        elif scale == 'ltm':
            self.ltm_update_count += 1
        
        return (critic1_loss.item() + critic2_loss.item()) / 2, actor_loss.item(), \
               alpha_loss.item(), actor_attention.mean(dim=0).detach().cpu().numpy()
    
    def save_model(self, filepath):
        """保存模型"""
        torch.save({
            'actor_state_dict': self.actor.state_dict(),
            'critic1_state_dict': self.critic1.state_dict(),
            'critic2_state_dict': self.critic2.state_dict(),
            'target_critic1_state_dict': self.target_critic1.state_dict(),
            'target_critic2_state_dict': self.target_critic2.state_dict(),
            'log_alpha': self.log_alpha,
            'actor_optimizer_state_dict': self.actor_optimizer.state_dict(),
            'critic1_optimizer_state_dict': self.critic1_optimizer.state_dict(),
            'critic2_optimizer_state_dict': self.critic2_optimizer.state_dict(),
            'alpha_optimizer_state_dict': self.alpha_optimizer.state_dict()
        }, filepath)
    
    def load_model(self, filepath):
        """加载模型"""
        checkpoint = torch.load(filepath)
        self.actor.load_state_dict(checkpoint['actor_state_dict'])
        self.critic1.load_state_dict(checkpoint['critic1_state_dict'])
        self.critic2.load_state_dict(checkpoint['critic2_state_dict'])
        self.target_critic1.load_state_dict(checkpoint['target_critic1_state_dict'])
        self.target_critic2.load_state_dict(checkpoint['target_critic2_state_dict'])
        self.log_alpha = checkpoint['log_alpha']
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer_state_dict'])
        self.critic1_optimizer.load_state_dict(checkpoint['critic1_optimizer_state_dict'])
        self.critic2_optimizer.load_state_dict(checkpoint['critic2_optimizer_state_dict'])
        self.alpha_optimizer.load_state_dict(checkpoint['alpha_optimizer_state_dict'])
        self.alpha = self.log_alpha.exp()

# ====================== 差分进化优化课程学习参数 ======================
def optimize_curriculum_parameters():
    """使用差分进化算法优化课程学习参数"""
    bounds = [
        (5.0, 15.0),        # initial_gravity
        (0.3, 1.0),         # initial_pole_length
        (0.05, 0.3),        # gravity_factor
        (0.01, 0.1),        # pole_length_factor
        (180.0, 200.0)      # performance_threshold
    ]
    
    def objective_function(params):
        """目标函数：最小化负回报（即最大化回报）"""
        clipped_params = []
        for i, param in enumerate(params):
            low, high = bounds[i]
            clipped_params.append(np.clip(param, low, high))
        
        initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = clipped_params
        
        # 设置随机种子
        random.seed(0)
        np.random.seed(0)
        torch.manual_seed(0)
        
        # 超参数设置
        actor_lr = 3e-4
        critic_lr = 3e-4
        alpha_lr = 3e-4
        num_episodes = 500
        gamma = 0.99
        tau = 0.005
        batch_size = 256
        buffer_size = 100000
        device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
        
        # 创建环境和难度调度器
        env = CustomCartPole()
        difficulty_scheduler = DifficultyScheduler(
            initial_gravity=initial_gravity,
            initial_pole_length=initial_pole_length,
            gravity_factor=gravity_factor,
            pole_length_factor=pole_length_factor,
            performance_threshold=performance_threshold
        )

        replay_buffer = ReplayBuffer(buffer_size)
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n
        agent = MultiScaleSAC(state_dim, action_dim, 
                             actor_lr=actor_lr, critic_lr=critic_lr, alpha_lr=alpha_lr,
                             gamma=gamma, tau=tau, device=device)

        return_list = []
        
        # 训练循环
        for episode in range(num_episodes):
            # 更新难度
            if episode > 0 and episode % 10 == 0 and len(return_list) >= 10:
                avg_performance = np.mean(return_list[-10:])
                gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
                env.gravity = gravity
                env.pole_length = pole_length
            
            # 开始新回合
            episode_return = 0
            state, _ = env.reset()
            done = False
            
            while not done:
                action, attention_weights = agent.select_action(state)
                next_state, reward, terminated, truncated, _ = env.step(action)
                done = terminated or truncated
                
                replay_buffer.add(state, action, reward, next_state, done, attention_weights)
                state = next_state
                episode_return += reward
            
            return_list.append(episode_return)
            
            # 更新SAC
            if replay_buffer.size() > batch_size:
                batch = replay_buffer.sample(batch_size)
                agent.update(batch)
        
        # 评估最终性能
        eval_returns = []
        for _ in range(10):
            episode_return = 0
            state, _ = env.reset()
            done = False
            
            while not done:
                action, _ = agent.select_action(state, evaluate=True)
                next_state, reward, terminated, truncated, _ = env.step(action)
                done = terminated or truncated
                state = next_state
                episode_return += reward
            
            eval_returns.append(episode_return)
        
        return -np.mean(eval_returns)
    
    # 运行差分进化算法
    print("开始使用差分进化算法优化课程学习参数...")
    result = differential_evolution(
        objective_function,
        bounds,
        strategy='best1bin',
        popsize=5,
        maxiter=5,
        tol=0.01,
        mutation=(0.5, 1),
        recombination=0.7,
        seed=42,
        disp=True
    )
    
    print("优化完成!")
    print("最优参数:", result.x)
    print("最佳适应度（平均回报）:", -result.fun)
    
    # 保存优化结果
    optimization_result = {
        'optimal_parameters': result.x.tolist(),
        'optimal_fitness': -result.fun,
        'optimization_bounds': bounds,
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    }
    
    result_file = os.path.join(EVOLUTION_DIR, "curriculum_optimization_result.json")
    with open(result_file, 'w') as f:
        json.dump(optimization_result, f, indent=4, cls=NumpyEncoder)
    
    return result.x, -result.fun

# ====================== 工具函数 ======================
def moving_average(data, window_size):
    """计算移动平均值"""
    return np.convolve(data, np.ones(window_size)/window_size, mode='valid')

def save_training_plots(return_list, mv_return, episode, prefix=""):
    """保存训练曲线图"""
    episodes_list = list(range(len(return_list)))
    
    plt.figure(figsize=(12, 6))
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('Multi-Scale SAC on CartPole-v1')
    plt.grid(True)
    
    if len(mv_return) > 0:
        plt.figure(figsize=(12, 6))
        mv_episodes = list(range(len(mv_return)))
        plt.plot(mv_episodes, mv_return)
        plt.xlabel('Episodes')
        plt.ylabel('Moving Average Returns')
        plt.title('Moving Average (window=100)')
        plt.grid(True)
    
    # 保存图片
    filename = f"{prefix}training_progress_episode_{episode}.png"
    filepath = os.path.join(PLOT_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

def save_training_info(hyperparams, return_list, mv_return):
    """保存训练信息和结果"""
    info = {
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
        'hyperparameters': hyperparams,
        'final_results': {
            'last_10_episodes_mean_return': np.mean(return_list[-10:]) if len(return_list) >= 10 else 0,
            'last_10_episodes_mean_mv_return': np.mean(mv_return[-10:]) if len(mv_return) >= 10 else 0,
            'max_return': np.max(return_list) if return_list else 0,
            'min_return': np.min(return_list) if return_list else 0,
            'total_episodes': len(return_list)
        }
    }
    
    info_file = os.path.join(BASE_DIR, "training_info.json")
    with open(info_file, 'w') as f:
        json.dump(info, f, indent=4, cls=NumpyEncoder)
    
    return info_file

# ====================== 主训练程序 ======================
if __name__ == "__main__":
    # 超参数设置
    actor_lr = 3e-4
    critic_lr = 3e-4
    alpha_lr = 3e-4
    num_episodes = 1000
    gamma = 0.99
    tau = 0.005
    batch_size = 256
    buffer_size = 100000
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    
    # 多时间尺度参数
    stm_update_freq = 1    # 快尺度更新频率
    ltm_update_freq = 5    # 慢尺度更新频率

    # 询问是否使用差分进化优化
    use_evolutionary = input("是否使用差分进化算法优化课程学习参数? (y/n): ").lower().strip() == 'y'
    
    if use_evolutionary:
        print("使用差分进化算法优化课程学习参数...")
        optimal_params, optimal_fitness = optimize_curriculum_parameters()
        
        # 解包最优参数
        initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = optimal_params
        
        print(f"优化结果:")
        print(f"  初始重力: {initial_gravity:.2f}")
        print(f"  初始杆长: {initial_pole_length:.4f}")
        print(f"  重力因子: {gravity_factor:.3f}")
        print(f"  杆长因子: {pole_length_factor:.3f}")
        print(f"  性能阈值: {performance_threshold:.1f}")
        print(f"  预期回报: {optimal_fitness:.1f}")
    else:
        # 使用默认参数
        initial_gravity = 9.8
        initial_pole_length = 0.5
        gravity_factor = 0.1
        pole_length_factor = 0.05
        performance_threshold = 195.0
        print("使用默认课程学习参数")

    # 存储超参数
    hyperparams = {
        'actor_learning_rate': actor_lr,
        'critic_learning_rate': critic_lr,
        'alpha_learning_rate': alpha_lr,
        'num_episodes': num_episodes,
        'gamma': gamma,
        'tau': tau,
        'batch_size': batch_size,
        'buffer_size': buffer_size,
        'device': str(device),
        'stm_update_freq': stm_update_freq,
        'ltm_update_freq': ltm_update_freq,
        'curriculum_learning': True,
        'evolutionary_optimization': use_evolutionary,
        'curriculum_params': {
            'initial_gravity': initial_gravity,
            'initial_pole_length': initial_pole_length,
            'gravity_factor': gravity_factor,
            'pole_length_factor': pole_length_factor,
            'performance_threshold': performance_threshold
        }
    }

    # 创建环境和难度调度器
    env = CustomCartPole()
    difficulty_scheduler = DifficultyScheduler(
        initial_gravity=initial_gravity,
        initial_pole_length=initial_pole_length,
        gravity_factor=gravity_factor,
        pole_length_factor=pole_length_factor,
        performance_threshold=performance_threshold
    )

    # 设置随机种子
    random.seed(0)
    np.random.seed(0)
    env.reset(seed=0)
    torch.manual_seed(0)

    # 获取环境维度
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    
    # 初始化性能记录列表
    return_list = []
    attention_history = []
    
    # 初始化SAC智能体
    agent = MultiScaleSAC(
        state_dim=state_dim,
        action_dim=action_dim,
        actor_lr=actor_lr,
        critic_lr=critic_lr,
        alpha_lr=alpha_lr,
        gamma=gamma,
        tau=tau,
        stm_update_freq=stm_update_freq,
        ltm_update_freq=ltm_update_freq,
        device=device
    )

    replay_buffer = ReplayBuffer(buffer_size)
    
    print(f"开始训练多时间尺度SAC算法(CartPole-v1 + 课程学习)...")
    print(f"结果将保存在: {BASE_DIR}")

    # 训练循环
    for episode in range(num_episodes):
        # 每10个回合评估并更新难度
        if episode > 0 and episode % 10 == 0 and len(return_list) >= 10:
            avg_performance = np.mean(return_list[-10:])
            gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
            env.gravity = gravity
            env.pole_length = pole_length
            
            print(f"Episode {episode}: Updated gravity to {gravity:.2f}, pole length to {pole_length:.4f}")
        
        # 开始新回合
        episode_return = 0
        state, _ = env.reset()
        done = False
        
        while not done:
            action, attention_weights = agent.select_action(state)
            next_state, reward, terminated, truncated, _ = env.step(action)
            done = terminated or truncated
            
            replay_buffer.add(state, action, reward, next_state, done, attention_weights)
            state = next_state
            episode_return += reward
        
        return_list.append(episode_return)
        attention_history.append(attention_weights)
        
        # 更新SAC
        if replay_buffer.size() > batch_size:
            batch = replay_buffer.sample(batch_size)
            critic_loss, actor_loss, alpha_loss, attn_weights = agent.update(batch)
            
            if episode % 100 == 0:
                print(f"Episode {episode}: Return = {episode_return}, "
                      f"Critic Loss = {critic_loss:.4f}, Actor Loss = {actor_loss:.4f}, "
                      f"Alpha Loss = {alpha_loss:.4f}")
        
        # 打印进度
        if episode % 100 == 0:
            avg_return = np.mean(return_list[-100:]) if len(return_list) >= 100 else np.mean(return_list)
            print(f"Episode {episode}: Return = {episode_return}, Avg Return (last 100) = {avg_return:.2f}")
    
    # 训练完成后保存最终结果
    if len(return_list) >= 100:
        mv_return = moving_average(return_list, 100)
    else:
        mv_return = return_list.copy()

    # 保存最终模型
    final_model_path = os.path.join(MODEL_DIR, "sac_model_final.pth")
    agent.save_model(final_model_path)

    # 保存最终训练曲线
    final_plot_path = save_training_plots(return_list, mv_return, num_episodes, "final_")

    # 保存训练信息
    info_path = save_training_info(hyperparams, return_list, mv_return)

    # 保存课程学习过程数据
    diff_progression_path = difficulty_scheduler.save_difficulty_progression()

    print(f"\n训练完成！")
    print(f"最终模型已保存: {final_model_path}")
    print(f"训练曲线图已保存: {final_plot_path}")
    print(f"训练信息已保存: {info_path}")
    print(f"课程学习过程数据已保存: {diff_progression_path}")
    print(f"最后10个回合的平均回报: {np.mean(return_list[-10:]):.3f}")

    # 显示最终图表
    episodes_list = list(range(len(return_list)))
    plt.figure(figsize=(12, 6))
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('Multi-Scale SAC on CartPole-v1 (Curriculum Learning)')
    plt.grid(True)
    plt.savefig(os.path.join(PLOT_DIR, "final_training_results.png"), dpi=300, bbox_inches='tight')
    plt.show()