import os
import yaml
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
from tqdm import tqdm
import argparse
import random
import time
from pathlib import Path

# 导入中文字体设置函数
from env.grid_env import setup_chinese_font

# 设置中文字体
setup_chinese_font()

from env.grid_env import GridMap, RobotEnv
from agent.dqn_agent import DQNAgent


def load_config(config_path):
    """加载配置文件"""
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    return config


def set_seed(seed):
    """设置随机种子"""
    if seed is None:
        return
    
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)


def train_dqn(config, model_dir, log_dir, seed=42):
    """
    训练 DQN 智能体
    
    Args:
        config: 配置参数
        model_dir: 模型保存目录
        log_dir: 日志保存目录
        seed: 随机种子
    """
    # 设置随机种子
    set_seed(seed)
    
    # 创建保存目录
    os.makedirs(model_dir, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)
    
    # 创建环境
    map_width = config['map']['width']
    map_height = config['map']['height']
    obstacle_ratio = config['map']['obstacle_ratio']
    
    grid_map = GridMap(width=map_width, height=map_height, obstacle_ratio=obstacle_ratio, seed=seed)
    env = RobotEnv(grid_map)
    
    # 创建智能体
    state_dim = 15  # [x, y, goal_x, goal_y, up_obs, right_obs, down_obs, left_obs, in_corner, dist_to_boundary, oscillation_feature, last_action(one-hot)]
    action_dim = 4  # [上, 右, 下, 左]
    
    agent = DQNAgent(
        state_dim=state_dim,
        action_dim=action_dim,
        config=config['dqn'],
        use_double_dqn=True,
        use_prioritized=True
    )
    
    # 训练参数
    num_episodes = config['dqn']['episodes']
    print_interval = 10
    save_interval = 100
    visualize_interval = 5  # 每5轮训练后显示下一轮的动态图
    
    # 记录训练数据
    rewards_history = []
    losses_history = []
    steps_history = []
    success_history = []
    
    # 设置动态可视化
    plt.ion()  # 打开交互模式
    fig, ax = plt.subplots(figsize=(10, 10))
    plt.show()
    
    # 开始训练
    print(f"开始训练 DQN 智能体，共 {num_episodes} 回合...")
    
    for episode in tqdm(range(1, num_episodes + 1)):
        state = env.reset()
        episode_reward = 0
        episode_loss = 0
        num_steps = 0
        num_updates = 0
        
        # 记录路径用于可视化
        path = [env.position]
        
        # 确定当前回合是否需要进行动态可视化 (前一回合是5的倍数)
        show_dynamic_viz = (episode > 1 and (episode - 1) % visualize_interval == 0)
        
        # 初始化可视化
        if show_dynamic_viz:
            ax.clear()
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
            plt.title(f"训练回合 {episode}, 步数: {num_steps}")
            plt.draw()
            plt.pause(0.1)
        
        while True:
            # 选择动作
            action = agent.select_action(state)
            
            # 执行动作
            next_state, reward, done, info = env.step(action)
            
            # 记录路径
            path.append(env.position)
            
            # 动态可视化
            if show_dynamic_viz:
                ax.clear()
                robot_paths = {0: path}
                goals = {0: env.goal}
                env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
                plt.title(f"训练回合 {episode}, 步数: {num_steps+1}, 奖励: {reward:.2f}")
                plt.draw()
                plt.pause(0.1)
            
            # 存储经验
            agent.store_transition(state, action, reward, next_state, done)
            
            # 学习
            loss = agent.learn()
            if loss is not None:
                episode_loss += loss
                num_updates += 1
            
            # 更新状态
            state = next_state
            episode_reward += reward
            num_steps += 1
            
            # 如果回合结束
            if done:
                break
        
        # 更新目标网络
        if episode % agent.target_update == 0:
            agent.update_target_network()
        
        # 更新探索率
        agent.update_epsilon()
        
        # 记录训练数据
        rewards_history.append(episode_reward)
        steps_history.append(num_steps)
        success_history.append(1 if env.grid_map.is_done(env.position, env.goal) else 0)
        
        if num_updates > 0:
            losses_history.append(episode_loss / num_updates)
        else:
            losses_history.append(0)
        
        # 打印训练信息
        if episode % print_interval == 0:
            avg_reward = np.mean(rewards_history[-print_interval:])
            avg_loss = np.mean([l for l in losses_history[-print_interval:] if l > 0])
            avg_steps = np.mean(steps_history[-print_interval:])
            success_rate = np.mean(success_history[-print_interval:]) * 100
            
            print(f"回合: {episode}/{num_episodes}, "
                  f"奖励: {avg_reward:.2f}, "
                  f"损失: {avg_loss:.4f}, "
                  f"步数: {avg_steps:.1f}, "
                  f"成功率: {success_rate:.1f}%, "
                  f"探索率: {agent.epsilon:.3f}")
        
        # 回合结束后的静态可视化（仅在非动态可视化的回合）
        if episode % visualize_interval == 0 and not show_dynamic_viz:
            print(f"\n回合 {episode} 可视化:")
            ax.clear()
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
            plt.title(f"训练回合 {episode}, 步数: {num_steps}, 成功: {'是' if env.grid_map.is_done(env.position, env.goal) else '否'}")
            plt.draw()
            plt.pause(1.0)  # 暂停让用户观察
        
        # 保存模型
        if episode % save_interval == 0 or episode == num_episodes:
            model_path = os.path.join(model_dir, f"dqn_model_ep{episode}.pth")
            agent.save_model(model_path)
            print(f"模型已保存到 {model_path}")
            
            # 保存训练曲线
            plot_training_curves(rewards_history, losses_history, steps_history, success_history, log_dir)
    
    # 关闭交互模式
    plt.ioff()
    
    # 保存最终模型
    final_model_path = os.path.join(model_dir, "dqn_model_final.pth")
    agent.save_model(final_model_path)
    print(f"最终模型已保存到 {final_model_path}")
    
    # 保存训练曲线
    plot_training_curves(rewards_history, losses_history, steps_history, success_history, log_dir)
    
    return agent


def plot_training_curves(rewards, losses, steps, success_rates, log_dir):
    """绘制训练曲线"""
    plt.figure(figsize=(20, 16))  # 增加图表尺寸
    
    # 奖励曲线
    plt.subplot(2, 2, 1)
    plt.plot(rewards)
    plt.title('Episode Rewards', fontsize=16)
    plt.xlabel('Episode', fontsize=14)
    plt.ylabel('Reward', fontsize=14)
    plt.tick_params(axis='both', which='major', labelsize=12)
    
    # 损失曲线
    plt.subplot(2, 2, 2)
    plt.plot([l for l in losses if l > 0])  # 过滤掉零损失
    plt.title('Training Loss', fontsize=16)
    plt.xlabel('Episode', fontsize=14)
    plt.ylabel('Loss', fontsize=14)
    plt.tick_params(axis='both', which='major', labelsize=12)
    
    # 步数曲线
    plt.subplot(2, 2, 3)
    plt.plot(steps)
    plt.title('Steps per Episode', fontsize=16)
    plt.xlabel('Episode', fontsize=14)
    plt.ylabel('Steps', fontsize=14)
    plt.tick_params(axis='both', which='major', labelsize=12)
    
    # 成功率曲线
    plt.subplot(2, 2, 4)
    window_size = min(100, len(success_rates))
    if window_size > 0:
        success_moving_avg = [
            np.mean(success_rates[max(0, i - window_size):i+1]) * 100 
            for i in range(len(success_rates))
        ]
        plt.plot(success_moving_avg)
        plt.title(f'Success Rate (Moving Avg, Window={window_size})', fontsize=16)
        plt.xlabel('Episode', fontsize=14)
        plt.ylabel('Success Rate (%)', fontsize=14)
        plt.ylim(0, 105)
        plt.tick_params(axis='both', which='major', labelsize=12)
    
    plt.tight_layout(pad=3.0)  # 增加子图之间的间距
    plt.savefig(os.path.join(log_dir, 'training_curves.png'), dpi=300)  # 增加DPI提高图像质量
    plt.close()


def evaluate_agent(agent, env, num_episodes=10, render=True):
    """
    评估智能体性能
    
    Args:
        agent: DQN 智能体
        env: 环境
        num_episodes: 评估回合数
        render: 是否渲染环境
    """
    success_count = 0
    total_rewards = 0
    total_steps = 0
    
    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0
        episode_steps = 0
        
        # 记录路径
        path = [env.position]
        
        while True:
            # 选择动作（评估模式）
            action = agent.select_action(state, eval_mode=True)
            
            # 执行动作
            next_state, reward, done, info = env.step(action)
            
            # 记录路径
            path.append(env.position)
            
            # 更新状态
            state = next_state
            episode_reward += reward
            episode_steps += 1
            
            # 如果回合结束
            if done:
                break
        
        # 统计结果
        success = env.grid_map.is_done(env.position, env.goal)
        if success:
            success_count += 1
        
        total_rewards += episode_reward
        total_steps += episode_steps
        
        print(f"回合 {episode+1}/{num_episodes}, "
              f"奖励: {episode_reward:.2f}, "
              f"步数: {episode_steps}, "
              f"成功: {'是' if success else '否'}")
        
        # 渲染环境
        if render:
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(robot_paths=robot_paths, goals=goals)
    
    # 计算平均指标
    success_rate = success_count / num_episodes * 100
    avg_reward = total_rewards / num_episodes
    avg_steps = total_steps / num_episodes
    
    print(f"\n评估结果:")
    print(f"成功率: {success_rate:.1f}%")
    print(f"平均奖励: {avg_reward:.2f}")
    print(f"平均步数: {avg_steps:.1f}")


def main():
    parser = argparse.ArgumentParser(description="训练 DQN 智能体")
    parser.add_argument('--config', type=str, default='config/map_config.yaml', help='配置文件路径')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')
    parser.add_argument('--eval', action='store_true', help='是否评估模型')
    parser.add_argument('--model', type=str, default=None, help='评估时加载的模型路径')
    parser.add_argument('--visualize', type=int, default=0, help='训练过程可视化间隔（0表示不可视化）')
    parser.add_argument('--step-visualize', action='store_true', help='是否进行逐步可视化')
    args = parser.parse_args()
    
    # 加载配置
    config = load_config(args.config)
    
    # 设置目录
    model_dir = 'models'
    log_dir = 'logs'
    
    if args.eval:
        # 评估模式
        if args.model is None:
            model_path = os.path.join(model_dir, 'dqn_model_final.pth')
        else:
            model_path = args.model
        
        if not os.path.exists(model_path):
            print(f"模型文件 {model_path} 不存在!")
            return
        
        # 创建环境
        grid_map = GridMap(
            width=config['map']['width'],
            height=config['map']['height'],
            obstacle_ratio=config['map']['obstacle_ratio'],
            seed=args.seed
        )
        env = RobotEnv(grid_map)
        
        # 创建智能体
        state_dim = 15
        action_dim = 4
        agent = DQNAgent(
            state_dim=state_dim,
            action_dim=action_dim,
            config=config['dqn'],
            use_double_dqn=True,
            use_prioritized=True
        )
        
        # 加载模型
        agent.load_model(model_path)
        print(f"已加载模型: {model_path}")
        
        # 评估智能体
        evaluate_agent(agent, env, num_episodes=5, render=True)
    else:
        # 训练模式
        agent = train_dqn(config, model_dir, log_dir, seed=args.seed, 
                          visualize_interval=args.visualize, step_visualize=args.step_visualize)


if __name__ == "__main__":
    main() 