import os
import yaml
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
from tqdm import tqdm
import argparse
import random
import time
from pathlib import Path

# 导入中文字体设置函数
from env.grid_env import setup_chinese_font, GridMap, RobotEnv
from agent.dqn_agent import DQNAgent

# 设置中文字体
setup_chinese_font()

def set_seed(seed):
    """设置随机种子"""
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)

def create_small_map(width=10, height=10, obstacle_ratio=0.15, seed=42):
    """创建小型网格地图"""
    grid_map = GridMap(width=width, height=height, obstacle_ratio=obstacle_ratio, seed=seed)
    return grid_map

def train_advanced_dqn(width=10, height=10, obstacle_ratio=0.15, episodes=50, 
                       use_double_dqn=True, use_prioritized=True, use_noisy=True, n_step=3, seed=42):
    """
    在小型网格环境中训练高级DQN智能体
    
    Args:
        width: 地图宽度
        height: 地图高度
        obstacle_ratio: 障碍物比例
        episodes: 训练回合数
        use_double_dqn: 是否使用Double DQN
        use_prioritized: 是否使用优先经验回放
        use_noisy: 是否使用Noisy Networks
        n_step: n步TD学习的步数
        seed: 随机种子
    """
    # 设置随机种子
    set_seed(seed)
    
    # 创建保存目录
    os.makedirs("models", exist_ok=True)
    os.makedirs("results", exist_ok=True)
    
    # 创建小型网格地图
    grid_map = create_small_map(width, height, obstacle_ratio, seed)
    env = RobotEnv(grid_map)
    
    # 创建DQN智能体
    state_dim = 15  # 更新后的状态维度: 10(基本) + 1(振荡特征) + 4(上一动作的one-hot编码)
    action_dim = 4  # [上, 右, 下, 左]
    
    # DQN配置
    dqn_config = {
        'learning_rate': 0.001,
        'gamma': 0.99,
        'epsilon_start': 1.0,
        'epsilon_end': 0.01,
        'epsilon_decay': 0.9,  # 更快的衰减
        'batch_size': 32,
        'memory_size': 10000,
        'target_update': 5
    }
    
    # 创建智能体
    agent = DQNAgent(
        state_dim=state_dim,
        action_dim=action_dim,
        config=dqn_config,
        use_double_dqn=use_double_dqn,
        use_prioritized=use_prioritized,
        use_noisy=use_noisy,
        n_step=n_step
    )
    
    # 记录训练数据
    rewards_history = []
    steps_history = []
    success_history = []
    
    # 设置动态可视化
    plt.ion()  # 打开交互模式
    fig, ax = plt.subplots(figsize=(8, 8))
    plt.show()
    
    # 开始训练
    print(f"开始训练高级DQN智能体，共{episodes}回合...")
    print(f"使用Double DQN: {use_double_dqn}, 优先经验回放: {use_prioritized}, Noisy Networks: {use_noisy}, {n_step}步TD学习")
    
    for episode in range(1, episodes + 1):
        # 确保起点和目标点之间有可达路径
        state = None
        while state is None:
            try:
                state = env.reset(ensure_path=True)
            except ValueError as e:
                print(f"重置环境失败: {e}, 重试...")
                continue
        
        episode_reward = 0
        num_steps = 0
        
        # 记录路径用于可视化
        path = [env.position]
        
        # 初始化可视化
        ax.clear()
        robot_paths = {0: path}
        goals = {0: env.goal}
        env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
        ax.set_title(f"训练回合 {episode}, 步数: {num_steps}")
        plt.draw()
        plt.pause(0.5)
        
        while True:
            # 选择动作
            action = agent.select_action(state)
            
            # 执行动作
            next_state, reward, done, info = env.step(action)
            
            # 记录路径
            path.append(env.position)
            
            # 动态可视化
            ax.clear()
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
            ax.set_title(f"训练回合 {episode}, 步数: {num_steps+1}")
            plt.draw()
            plt.pause(0.1)
            
            # 存储经验
            agent.store_transition(state, action, reward, next_state, done)
            
            # 学习
            agent.learn()
            
            # 更新状态
            state = next_state
            episode_reward += reward
            num_steps += 1
            
            # 如果回合结束或者超过最大步数
            if done or num_steps >= width * height * 2:
                break
        
        # 更新目标网络
        if episode % agent.target_update == 0:
            agent.update_target_network()
        
        # 更新探索率
        agent.update_epsilon()
        
        # 记录训练数据
        rewards_history.append(episode_reward)
        steps_history.append(num_steps)
        success = env.grid_map.is_done(env.position, env.goal)
        success_history.append(1 if success else 0)
        
        # 打印训练信息
        success_rate = np.mean(success_history[-min(10, len(success_history)):]) * 100
        print(f"回合: {episode}/{episodes}, "
              f"奖励: {episode_reward:.2f}, "
              f"步数: {num_steps}, "
              f"成功: {'是' if success else '否'}, "
              f"成功率: {success_rate:.1f}%")
        
        # 在回合结束后暂停一下
        plt.pause(0.5)
    
    # 关闭交互模式
    plt.ioff()
    
    # 保存最终模型
    model_name = f"dqn_{'double' if use_double_dqn else 'simple'}_{'prio' if use_prioritized else 'std'}_{'noisy' if use_noisy else 'eps'}_n{n_step}.pth"
    final_model_path = os.path.join("models", model_name)
    agent.save_model(final_model_path)
    print(f"最终模型已保存到 {final_model_path}")
    
    # 在训练结束后保存最终的训练结果图
    plt.figure(figsize=(15, 5))
    
    plt.subplot(1, 3, 1)
    plt.plot(rewards_history)
    plt.title('回合奖励')
    plt.xlabel('回合')
    plt.ylabel('奖励')
    
    plt.subplot(1, 3, 2)
    plt.plot(steps_history)
    plt.title('回合步数')
    plt.xlabel('回合')
    plt.ylabel('步数')
    
    plt.subplot(1, 3, 3)
    window_size = min(5, len(success_history))
    if window_size > 0:
        success_moving_avg = [
            np.mean(success_history[max(0, i - window_size):i+1]) * 100 
            for i in range(len(success_history))
        ]
        plt.plot(success_moving_avg)
        plt.title('成功率 (移动平均)')
        plt.xlabel('回合')
        plt.ylabel('成功率 (%)')
        plt.ylim(0, 105)
    
    plt.tight_layout()
    plt.savefig(os.path.join("results", f"advanced_dqn_training_{model_name.replace('.pth', '.png')}"))
    plt.close()  # 不显示，只保存
    
    return agent, grid_map

def compare_agents(width=10, height=10, obstacle_ratio=0.15, num_episodes=10, seed=42):
    """
    比较不同DQN变体的性能
    
    Args:
        width: 地图宽度
        height: 地图高度
        obstacle_ratio: 障碍物比例
        num_episodes: 评估回合数
        seed: 随机种子
    """
    # 设置随机种子
    set_seed(seed)
    
    # 创建测试地图
    grid_map = create_small_map(width, height, obstacle_ratio, seed)
    
    # 定义不同的DQN变体
    variants = [
        {"name": "普通DQN", "double": False, "prioritized": False, "noisy": False, "n_step": 1},
        {"name": "Double DQN", "double": True, "prioritized": False, "noisy": False, "n_step": 1},
        {"name": "Double DQN + 优先经验回放", "double": True, "prioritized": True, "noisy": False, "n_step": 1},
        {"name": "Double DQN + Noisy Networks", "double": True, "prioritized": False, "noisy": True, "n_step": 1},
        {"name": "Double DQN + 3步TD学习", "double": True, "prioritized": False, "noisy": False, "n_step": 3},
        {"name": "完整改进版", "double": True, "prioritized": True, "noisy": True, "n_step": 3}
    ]
    
    results = {}
    
    # 训练并评估每个变体
    for variant in variants:
        print(f"\n开始训练 {variant['name']}...")
        
        # 训练智能体
        agent, _ = train_advanced_dqn(
            width=width, 
            height=height, 
            obstacle_ratio=obstacle_ratio,
            episodes=30,  # 较少的回合用于比较
            use_double_dqn=variant["double"],
            use_prioritized=variant["prioritized"],
            use_noisy=variant["noisy"],
            n_step=variant["n_step"],
            seed=seed
        )
        
        # 评估智能体
        env = RobotEnv(grid_map)
        success_count = 0
        total_steps = 0
        
        for episode in range(num_episodes):
            # 确保起点和目标点之间有可达路径
            state = None
            while state is None:
                try:
                    state = env.reset(ensure_path=True)
                except ValueError as e:
                    print(f"重置环境失败: {e}, 重试...")
                    continue
            
            done = False
            steps = 0
            
            while not done and steps < width * height * 2:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = env.step(action)
                state = next_state
                steps += 1
            
            if env.grid_map.is_done(env.position, env.goal):
                success_count += 1
                total_steps += steps
        
        # 计算结果
        success_rate = success_count / num_episodes * 100
        avg_steps = total_steps / success_count if success_count > 0 else float('inf')
        
        results[variant["name"]] = {
            "success_rate": success_rate,
            "avg_steps": avg_steps
        }
        
        print(f"{variant['name']} 评估结果: 成功率 {success_rate:.1f}%, 平均步数 {avg_steps:.1f}")
    
    # 绘制比较图表
    plt.figure(figsize=(12, 6))
    
    # 成功率对比
    plt.subplot(1, 2, 1)
    names = list(results.keys())
    success_rates = [results[name]["success_rate"] for name in names]
    plt.bar(names, success_rates)
    plt.title('成功率对比')
    plt.ylabel('成功率 (%)')
    plt.xticks(rotation=45, ha='right')
    
    # 平均步数对比
    plt.subplot(1, 2, 2)
    avg_steps = [results[name]["avg_steps"] for name in names]
    plt.bar(names, avg_steps)
    plt.title('平均步数对比')
    plt.ylabel('平均步数')
    plt.xticks(rotation=45, ha='right')
    
    plt.tight_layout()
    plt.savefig(os.path.join("results", "dqn_variants_comparison.png"))
    plt.show()
    
    return results

def main():
    parser = argparse.ArgumentParser(description="训练高级DQN智能体")
    parser.add_argument("--mode", type=str, default="train", choices=["train", "compare"], help="运行模式: train或compare")
    parser.add_argument("--width", type=int, default=10, help="地图宽度")
    parser.add_argument("--height", type=int, default=10, help="地图高度")
    parser.add_argument("--obstacle_ratio", type=float, default=0.15, help="障碍物比例")
    parser.add_argument("--episodes", type=int, default=50, help="训练回合数")
    parser.add_argument("--seed", type=int, default=42, help="随机种子")
    parser.add_argument("--double", action="store_true", help="是否使用Double DQN")
    parser.add_argument("--prioritized", action="store_true", help="是否使用优先经验回放")
    parser.add_argument("--noisy", action="store_true", help="是否使用Noisy Networks")
    parser.add_argument("--n_step", type=int, default=1, help="n步TD学习的步数")
    
    args = parser.parse_args()
    
    if args.mode == "train":
        train_advanced_dqn(
            width=args.width,
            height=args.height,
            obstacle_ratio=args.obstacle_ratio,
            episodes=args.episodes,
            use_double_dqn=args.double,
            use_prioritized=args.prioritized,
            use_noisy=args.noisy,
            n_step=args.n_step,
            seed=args.seed
        )
    else:
        compare_agents(
            width=args.width,
            height=args.height,
            obstacle_ratio=args.obstacle_ratio,
            seed=args.seed
        )

if __name__ == "__main__":
    main() 