import random
import gymnasium as gym
import numpy as np
import collections
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import os
import json
from datetime import datetime
import sys
from scipy.optimize import differential_evolution

# 自定义JSON编码器处理NumPy和PyTorch数据类型
class NumpyEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32, 
                             np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
            return int(obj)
        elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
            return float(obj)
        elif isinstance(obj, np.bool_):
            return bool(obj)
        elif isinstance(obj, torch.Tensor):
            return obj.cpu().numpy().tolist()
        return super(NumpyEncoder, self).default(obj)

# 创建保存结果的目录
def create_save_dirs():
    """创建保存模型和结果的目录"""
    base_dir = "dqn_evolutionary_complete_results"
    model_dir = os.path.join(base_dir, "models")
    plot_dir = os.path.join(base_dir, "plots")
    curriculum_dir = os.path.join(base_dir, "curriculum")
    evolution_dir = os.path.join(base_dir, "evolution")
    network_dir = os.path.join(base_dir, "network_weights")
    
    for directory in [base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir]:
        if not os.path.exists(directory):
            os.makedirs(directory)
    
    return base_dir, model_dir, plot_dir, curriculum_dir, evolution_dir, network_dir

# 创建目录
BASE_DIR, MODEL_DIR, PLOT_DIR, CURRICULUM_DIR, EVOLUTION_DIR, NETWORK_DIR = create_save_dirs()

# ====================== 课程学习核心组件 ======================
class CustomCartPole(gym.Env):
    """自定义CartPole环境，支持动态参数调整"""
    def __init__(self, gravity=9.8, pole_length=0.5):
        super().__init__()
        self.base_env = gym.make('CartPole-v1')
        self.gravity = gravity
        self.pole_length = pole_length
        self.observation_space = self.base_env.observation_space
        self.action_space = self.base_env.action_space
        
    def step(self, action):
        # 应用当前物理参数
        self.base_env.unwrapped.gravity = self.gravity
        self.base_env.unwrapped.length = self.pole_length
        result = self.base_env.step(action)
        # 处理gymnasium返回的5个值
        next_state, reward, terminated, truncated, info = result
        done = terminated or truncated
        return next_state, reward, done, info
    
    def reset(self, **kwargs):
        result = self.base_env.reset(**kwargs)
        # 处理gymnasium返回的2个值
        state, info = result
        return state, info
    
    def render(self, mode='human'):
        return self.base_env.render(mode)
    
    def close(self):
        return self.base_env.close()

class DifficultyScheduler:
    """动态难度调度器 - 课程学习核心"""
    def __init__(self, 
                 initial_gravity=9.8, 
                 initial_pole_length=0.5,
                 gravity_factor=0.1,
                 pole_length_factor=0.05,
                 performance_threshold=195.0):
        # 初始参数
        self.gravity = initial_gravity
        self.pole_length = initial_pole_length
        self.initial_gravity = initial_gravity
        self.initial_pole_length = initial_pole_length
        
        # 调整系数
        self.gravity_factor = gravity_factor
        self.pole_length_factor = pole_length_factor
        self.performance_threshold = performance_threshold
        
        # 跟踪变量
        self.t_step = 0
        self.difficulty_history = []
        
    def update_difficulty(self, performance):
        """根据性能动态更新难度参数"""
        self.t_step += 1
        
        # 重力调整：正弦波动（周期性增加挑战性）
        self.gravity = self.initial_gravity * (1 + self.gravity_factor * np.sin(self.t_step/100))
        
        # 杆长调整：渐进式增加（基于性能达标）
        if performance > self.performance_threshold:
            self.pole_length *= (1 + self.pole_length_factor)
        
        # 记录历史
        self.difficulty_history.append({
            'step': self.t_step,
            'gravity': self.gravity,
            'pole_length': self.pole_length,
            'performance': performance
        })
        
        return self.gravity, self.pole_length
    
    def save_difficulty_progression(self, filename="difficulty_progression.json"):
        """保存难度变化历史"""
        filepath = os.path.join(CURRICULUM_DIR, filename)
        with open(filepath, 'w') as f:
            json.dump(self.difficulty_history, f, indent=4, cls=NumpyEncoder)
        return filepath
    
    def plot_difficulty_progression(self, filename="difficulty_progression.png"):
        """可视化难度变化曲线"""
        if not self.difficulty_history:
            return None
            
        steps = [entry['step'] for entry in self.difficulty_history]
        gravities = [entry['gravity'] for entry in self.difficulty_history]
        pole_lengths = [entry['pole_length'] for entry in self.difficulty_history]
        performances = [entry['performance'] for entry in self.difficulty_history]
        
        plt.figure(figsize=(12, 8))
        
        # 重力变化曲线
        plt.subplot(2, 1, 1)
        plt.plot(steps, gravities, label='Gravity', color='blue')
        plt.xlabel('Training Steps')
        plt.ylabel('Gravity (m/s²)')
        plt.title('Gravity Variation Over Training')
        plt.grid(True)
        
        # 杆长和性能曲线
        plt.subplot(2, 1, 2)
        plt.plot(steps, pole_lengths, label='Pole Length', color='green')
        plt.plot(steps, performances, label='Performance', color='red', linestyle='--')
        plt.xlabel('Training Steps')
        plt.ylabel('Value')
        plt.title('Pole Length and Performance Progression')
        plt.legend()
        plt.grid(True)
        
        plt.tight_layout()
        
        # 保存图片
        filepath = os.path.join(CURRICULUM_DIR, filename)
        plt.savefig(filepath, dpi=300, bbox_inches='tight')
        plt.close()
        
        return filepath

class ReplayBuffer:
    ''' 经验回放池 '''
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)  # 队列,先进先出

    def add(self, state, action, reward, next_state, done):  # 将数据加入buffer
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):  # 从buffer中采样数据,数量为batch_size
        transitions = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*transitions)
        return np.array(state), action, reward, np.array(next_state), done

    def size(self):  # 目前buffer中数据的数量
        return len(self.buffer)

# ====================== DQN算法实现 ======================
class QNet(nn.Module):
    ''' 只有一层隐藏层的Q网络 '''
    def __init__(self, state_dim, hidden_dim, action_dim):
        super(QNet, self).__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, action_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))  # 隐藏层使用ReLU激活函数
        return self.fc2(x)
 
class DQN:
    ''' DQN算法 '''
    def __init__(self, state_dim, hidden_dim, action_dim, learning_rate, gamma,
                 epsilon, target_update, device):
        self.action_dim = action_dim
        self.q_net = QNet(state_dim, hidden_dim, action_dim).to(device)
        self.target_q_net = QNet(state_dim, hidden_dim, action_dim).to(device)
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=learning_rate)
        self.gamma = gamma
        self.epsilon = epsilon
        self.target_update = target_update
        self.count = 0
        self.device = device

    def take_action(self, state):
        """ε-greedy策略选择动作"""
        if np.random.random() < self.epsilon:
            action = np.random.randint(self.action_dim)
        else:
            # 确保状态数据转换为float32类型（与模型权重一致）
            state = torch.tensor(np.array([state]), dtype=torch.float32).to(self.device)
            action = self.q_net(state).argmax().item()
        return action

    def update(self, transition_dict):
        """更新DQN算法"""
        # 确保所有张量都是float32类型
        states = torch.tensor(transition_dict['states'], dtype=torch.float32).to(self.device)
        actions = torch.tensor(transition_dict['actions']).view(-1, 1).to(self.device)
        rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float32).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float32).to(self.device)
        dones = torch.tensor(transition_dict['dones'], dtype=torch.float32).view(-1, 1).to(self.device)

        q_values = self.q_net(states).gather(1, actions)  # Q值
        max_next_q_values = self.target_q_net(next_states).max(1)[0].view(-1, 1)  # 下个状态的最大Q值
        q_targets = rewards + self.gamma * max_next_q_values * (1 - dones)  # TD误差目标
        
        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))  # 均方误差损失函数[1](@ref)
        
        self.optimizer.zero_grad()  # PyTorch中默认梯度会累积,这里需要显式将梯度置为0
        dqn_loss.backward()  # 反向传播更新参数
        self.optimizer.step()

        if self.count % self.target_update == 0:
            self.target_q_net.load_state_dict(self.q_net.state_dict())  # 更新目标网络
        self.count += 1

    def save_model(self, filepath):
        """保存模型权重"""
        torch.save({
            'q_net_state_dict': self.q_net.state_dict(),
            'target_q_net_state_dict': self.target_q_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict()
        }, filepath)

    def load_model(self, filepath):
        """加载模型权重"""
        checkpoint = torch.load(filepath)
        self.q_net.load_state_dict(checkpoint['q_net_state_dict'])
        self.target_q_net.load_state_dict(checkpoint['target_q_net_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

# ====================== 工具函数 ======================
def moving_average(data, window_size):
    """计算移动平均值"""
    return np.convolve(data, np.ones(window_size)/window_size, mode='valid')

def save_training_plots(return_list, mv_return, episode, prefix=""):
    """保存训练曲线图"""
    episodes_list = list(range(len(return_list)))
    
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('DQN on CartPole-v1')
    
    plt.subplot(1, 2, 2)
    mv_episodes = list(range(len(mv_return)))
    plt.plot(mv_episodes, mv_return)
    plt.xlabel('Episodes')
    plt.ylabel('Moving Average Returns')
    plt.title('Moving Average (window=9)')
    
    plt.tight_layout()
    
    # 保存图片
    filename = f"{prefix}training_progress_episode_{episode}.png"
    filepath = os.path.join(PLOT_DIR, filename)
    plt.savefig(filepath, dpi=300, bbox_inches='tight')
    plt.close()
    
    return filepath

def save_training_info(hyperparams, return_list, mv_return):
    """保存训练信息和结果"""
    info = {
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
        'hyperparameters': hyperparams,
        'final_results': {
            'last_10_episodes_mean_return': np.mean(return_list[-10:]) if len(return_list) >= 10 else 0,
            'last_10_episodes_mean_mv_return': np.mean(mv_return[-10:]) if len(mv_return) >= 10 else 0,
            'max_return': np.max(return_list) if return_list else 0,
            'min_return': np.min(return_list) if return_list else 0,
            'total_episodes': len(return_list)
        }
    }
    
    info_file = os.path.join(BASE_DIR, "training_info.json")
    with open(info_file, 'w') as f:
        json.dump(info, f, indent=4, cls=NumpyEncoder)
    
    return info_file

# ====================== 演化算法优化 ======================
def train_dqn_with_curriculum(params, num_episodes=100, eval_episodes=10, show_progress=False):
    """
    使用给定参数训练DQN并返回适应度值
    """
    # 解包参数
    initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = params
    
    # 设置随机种子以确保可重复性
    random.seed(0)
    np.random.seed(0)
    torch.manual_seed(0)
    
    # 超参数设置
    lr = 2e-3
    hidden_dim = 128
    gamma = 0.98
    epsilon = 0.01
    target_update = 10
    buffer_size = 10000
    minimal_size = 500
    batch_size = 64
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    
    # 创建环境和难度调度器
    env = CustomCartPole()
    difficulty_scheduler = DifficultyScheduler(
        initial_gravity=initial_gravity,
        initial_pole_length=initial_pole_length,
        gravity_factor=gravity_factor,
        pole_length_factor=pole_length_factor,
        performance_threshold=performance_threshold
    )

    replay_buffer = ReplayBuffer(buffer_size)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, target_update, device)

    return_list = []
    
    # 训练循环
    for i in range(10):  # 10个迭代
        if show_progress:
            pbar = tqdm(total=int(num_episodes / 10), desc=f'Iteration {i+1}/10')
            
        for i_episode in range(int(num_episodes / 10)):
            # 每10个回合评估并更新难度
            if i_episode > 0 and i_episode % 10 == 0:
                avg_performance = np.mean(return_list[-10:]) if len(return_list) >= 10 else 0
                gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
                env.gravity = gravity
                env.pole_length = pole_length
                
                if show_progress:
                    pbar.set_postfix({
                        'gravity': f'{gravity:.2f}',
                        'pole_length': f'{pole_length:.4f}',
                        'perf': f'{avg_performance:.1f}'
                    })
            
            # 开始新回合
            episode_return = 0
            state, info = env.reset()
            done = False
            
            while not done:
                action = agent.take_action(state)
                next_state, reward, done, info = env.step(action)
                
                replay_buffer.add(state, action, reward, next_state, done)
                state = next_state
                episode_return += reward
                
                if replay_buffer.size() > minimal_size:
                    b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                    transition_dict = {
                        'states': b_s,
                        'actions': b_a,
                        'next_states': b_ns,
                        'rewards': b_r,
                        'dones': b_d
                    }
                    agent.update(transition_dict)
            
            return_list.append(episode_return)
            
            if show_progress and (i_episode + 1) % 10 == 0:
                pbar.set_postfix({
                    'episode': i_episode + 1,
                    'return': '%.3f' % np.mean(return_list[-10:])
                })
                pbar.update(10)
        
        if show_progress:
            pbar.close()
    
    # 评估最终性能
    eval_returns = []
    for _ in range(eval_episodes):
        episode_return = 0
        state, info = env.reset()
        done = False
        
        while not done:
            action = agent.take_action(state)
            next_state, reward, done, info = env.step(action)
            state = next_state
            episode_return += reward
        
        eval_returns.append(episode_return)
    
    # 返回平均评估回报作为适应度
    return np.mean(eval_returns)

def objective_function(params):
    """
    目标函数：最小化负回报（即最大化回报）
    """
    # 参数边界检查
    bounds = [
        (5.0, 15.0),        # initial_gravity
        (0.3, 1.0),         # initial_pole_length
        (0.05, 0.3),        # gravity_factor
        (0.01, 0.1),        # pole_length_factor
        (180.0, 200.0)      # performance_threshold
    ]
    
    # 将参数裁剪到边界内
    clipped_params = []
    for i, param in enumerate(params):
        low, high = bounds[i]
        clipped_params.append(np.clip(param, low, high))
    
    # 训练并获取适应度
    fitness = -train_dqn_with_curriculum(clipped_params)  # 最小化负回报 = 最大化回报
    
    # 保存当前参数和适应度
    evolution_data = {
        'params': clipped_params,
        'fitness': -fitness,  # 转换回正数
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    }
    
    evolution_file = os.path.join(EVOLUTION_DIR, f"evolution_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")
    with open(evolution_file, 'w') as f:
        json.dump(evolution_data, f, indent=4, cls=NumpyEncoder)
    
    return fitness

def optimize_with_evolutionary_algorithm():
    """
    使用差分演化算法优化课程学习参数
    """
    # 定义参数边界
    bounds = [
        (5.0, 15.0),        # initial_gravity
        (0.3, 1.0),         # initial_pole_length
        (0.05, 0.3),        # gravity_factor
        (0.01, 0.1),        # pole_length_factor
        (180.0, 200.0)      # performance_threshold
    ]
    
    print("开始演化算法优化...")
    print("参数边界:", bounds)
    
    # 运行差分演化
    result = differential_evolution(
        objective_function,
        bounds,
        strategy='best1bin',
        popsize=5,          # 种群大小
        maxiter=5,         # 迭代次数
        tol=0.01,
        mutation=(0.5, 1),
        recombination=0.7,
        seed=42,
        disp=True
    )
    
    print("优化完成!")
    print("最优参数:", result.x)
    print("最佳适应度（平均回报）:", -result.fun)
    
    # 保存优化结果
    optimization_result = {
        'optimal_parameters': result.x.tolist(),
        'optimal_fitness': -result.fun,
        'optimization_bounds': bounds,
        'timestamp': datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    }
    
    result_file = os.path.join(EVOLUTION_DIR, "optimization_result.json")
    with open(result_file, 'w') as f:
        json.dump(optimization_result, f, indent=4, cls=NumpyEncoder)
    
    return result.x, -result.fun

# ====================== 主训练函数 ======================
def main_training(use_evolutionary_optimization=True):
    """
    主训练函数
    """
    if use_evolutionary_optimization:
        print("使用演化算法优化课程学习参数...")
        optimal_params, optimal_fitness = optimize_with_evolutionary_algorithm()
        
        # 解包最优参数
        initial_gravity, initial_pole_length, gravity_factor, pole_length_factor, performance_threshold = optimal_params
        
        print(f"优化结果:")
        print(f"  初始重力: {initial_gravity:.2f}")
        print(f"  初始杆长: {initial_pole_length:.4f}")
        print(f"  重力因子: {gravity_factor:.3f}")
        print(f"  杆长因子: {pole_length_factor:.3f}")
        print(f"  性能阈值: {performance_threshold:.1f}")
        print(f"  预期回报: {optimal_fitness:.1f}")
    else:
        # 使用默认参数
        initial_gravity = 9.8
        initial_pole_length = 0.5
        gravity_factor = 0.1
        pole_length_factor = 0.05
        performance_threshold = 195.0
        print("使用默认课程学习参数")
    
    # 超参数设置
    lr = 2e-3
    num_episodes = 500
    hidden_dim = 128
    gamma = 0.98
    epsilon = 0.01
    target_update = 10
    buffer_size = 10000
    minimal_size = 500
    batch_size = 64
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")

    # 存储超参数
    hyperparams = {
        'learning_rate': lr,
        'num_episodes': num_episodes,
        'hidden_dim': hidden_dim,
        'gamma': gamma,
        'epsilon': epsilon,
        'target_update': target_update,
        'buffer_size': buffer_size,
        'minimal_size': minimal_size,
        'batch_size': batch_size,
        'device': str(device),
        'curriculum_learning': True,
        'evolutionary_optimization': use_evolutionary_optimization,
        'curriculum_params': {
            'initial_gravity': initial_gravity,
            'initial_pole_length': initial_pole_length,
            'gravity_factor': gravity_factor,
            'pole_length_factor': pole_length_factor,
            'performance_threshold': performance_threshold
        }
    }

    # 创建环境和难度调度器
    env = CustomCartPole()
    difficulty_scheduler = DifficultyScheduler(
        initial_gravity=initial_gravity,
        initial_pole_length=initial_pole_length,
        gravity_factor=gravity_factor,
        pole_length_factor=pole_length_factor,
        performance_threshold=performance_threshold
    )

    random.seed(0)
    np.random.seed(0)
    env.reset(seed=0)
    torch.manual_seed(0)

    replay_buffer = ReplayBuffer(buffer_size)
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n
    agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon, target_update, device)

    return_list = []
    print(f"开始训练DQN算法(CartPole-v1 + 课程学习)...")
    print(f"结果将保存在: {BASE_DIR}")

    # 训练循环
    for i in range(10):
        with tqdm(total=int(num_episodes / 10), desc=f'Iteration {i+1}/10') as pbar:
            for i_episode in range(int(num_episodes / 10)):
                # 每10个回合评估并更新难度
                if i_episode > 0 and i_episode % 10 == 0 and len(return_list) >= 10:
                    avg_performance = np.mean(return_list[-10:])
                    gravity, pole_length = difficulty_scheduler.update_difficulty(avg_performance)
                    env.gravity = gravity
                    env.pole_length = pole_length
                    
                    pbar.set_postfix({
                        'gravity': f'{gravity:.2f}',
                        'pole_length': f'{pole_length:.4f}',
                        'perf': f'{avg_performance:.1f}'
                    })
                
                # 开始新回合
                episode_return = 0
                state, info = env.reset()
                done = False
                
                while not done:
                    action = agent.take_action(state)
                    next_state, reward, done, info = env.step(action)
                    
                    replay_buffer.add(state, action, reward, next_state, done)
                    state = next_state
                    episode_return += reward
                    
                    if replay_buffer.size() > minimal_size:
                        b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {
                            'states': b_s,
                            'actions': b_a,
                            'next_states': b_ns,
                            'rewards': b_r,
                            'dones': b_d
                        }
                        agent.update(transition_dict)
                
                return_list.append(episode_return)
                
                if (i_episode + 1) % 10 == 0:
                    pbar.set_postfix({
                        'episode': i_episode + 1,
                        'return': '%.3f' % np.mean(return_list[-10:])
                    })
                
                pbar.update(1)
            
            # 每完成一个迭代保存一次模型
            agent.save_model(os.path.join(MODEL_DIR, f"dqn_model_iteration_{i}.pth"))
            
            # 保存训练曲线
            if len(return_list) >= 9:
                mv_return = moving_average(return_list, 9)
                plot_path = save_training_plots(return_list, mv_return, (i+1) * int(num_episodes / 10), f"iter_{i}_")
    
    # 训练完成后保存最终结果
    if len(return_list) >= 9:
        mv_return = moving_average(return_list, 9)
    else:
        mv_return = return_list.copy()

    # 保存最终模型
    final_model_path = os.path.join(MODEL_DIR, "dqn_model_final.pth")
    agent.save_model(final_model_path)

    # 保存最终训练曲线
    final_plot_path = save_training_plots(return_list, mv_return, num_episodes, "final_")

    # 保存训练信息
    info_path = save_training_info(hyperparams, return_list, mv_return)

    # 保存课程学习过程数据
    diff_progression_path = difficulty_scheduler.save_difficulty_progression()
    diff_plot_path = difficulty_scheduler.plot_difficulty_progression()

    print(f"\n训练完成！")
    print(f"最终模型已保存: {final_model_path}")
    print(f"训练曲线图已保存: {final_plot_path}")
    print(f"训练信息已保存: {info_path}")
    print(f"课程学习过程数据已保存: {diff_progression_path}")
    print(f"课程学习可视化图已保存: {diff_plot_path}")
    print(f"最后10个回合的平均回报: {np.mean(return_list[-10:]):.3f}")

    # 显示最终图表
    episodes_list = list(range(len(return_list)))
    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(episodes_list, return_list)
    plt.xlabel('Episodes')
    plt.ylabel('Returns')
    plt.title('DQN on CartPole-v1 (Curriculum Learning)')

    plt.subplot(1, 2, 2)
    mv_episodes = list(range(len(mv_return)))
    plt.plot(mv_episodes, mv_return)
    plt.xlabel('Episodes')
    plt.ylabel('Moving Average Returns')
    plt.title('Moving Average (window=9)')

    plt.tight_layout()
    plt.savefig(os.path.join(PLOT_DIR, "final_training_results.png"), dpi=300, bbox_inches='tight')
    plt.show()

# ====================== 主程序 ======================
if __name__ == "__main__":
    # 询问是否使用演化算法优化
    use_evolutionary = input("是否使用演化算法优化课程学习参数? (y/n): ").lower().strip() == 'y'
    
    # 开始训练
    main_training(use_evolutionary_optimization=use_evolutionary)