import os
import yaml
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
from tqdm import tqdm
import argparse
import random
import time
from pathlib import Path
import sys
from collections import deque

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

# 导入中文字体设置函数
from env.grid_env import setup_chinese_font

# 设置中文字体
setup_chinese_font()

from env.grid_env import GridMap, RobotEnv
from agent.dqn_agent import DQNAgent

def set_seed(seed):
    """设置随机种子"""
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)

def create_small_map(width=10, height=10, obstacle_ratio=0.1, seed=42):
    """创建小型网格地图，减少障碍物比例以适应小规模网格"""
    grid_map = GridMap(width=width, height=height, obstacle_ratio=obstacle_ratio, seed=seed)
    return grid_map

def create_warehouse_map(width=20, height=20, seed=42):
    """
    创建仓储网格地图，货架（障碍物）按照规则排列
    
    Args:
        width: 地图宽度
        height: 地图高度
        seed: 随机种子
        
    Returns:
        GridMap: 仓储网格地图
    """
    # 设置随机种子
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)
    
    # 创建空白地图
    grid_map = GridMap(width=width, height=height, obstacle_ratio=0, seed=seed)
    
    # 定义货架位置
    obstacles = []
    
    # 创建多排货架，每排货架之间留有通道
    # 每个货架占据2个单元格，货架之间的通道宽度为1个单元格
    
    # 水平货架排
    for y in range(3, height-3, 3):  # 每隔3行放置一排货架
        for x in range(2, width-2, 4):  # 每隔4列放置一个货架（2格宽）
            obstacles.append((x, y))
            obstacles.append((x+1, y))
    
    # 垂直通道
    main_aisle_x = width // 2
    for y in range(2, height-2):
        if (main_aisle_x, y) in obstacles:
            obstacles.remove((main_aisle_x, y))
        if (main_aisle_x-1, y) in obstacles:
            obstacles.remove((main_aisle_x-1, y))
    
    # 设置障碍物（货架）
    grid_map.set_obstacles(obstacles)
    
    return grid_map, obstacles

def is_path_exists(grid_map, start, goal):
    """
    检查从起点到目标点是否存在路径
    
    Args:
        grid_map: 网格地图
        start: 起点 (x, y)
        goal: 目标点 (x, y)
        
    Returns:
        bool: 是否存在路径
    """
    # BFS搜索
    queue = [start]
    visited = {start}
    
    while queue:
        x, y = queue.pop(0)
        
        # 到达终点
        if (x, y) == goal:
            return True
        
        # 检查四个方向
        for dx, dy in [(0, 1), (1, 0), (0, -1), (-1, 0)]:  # 上、右、下、左
            nx, ny = x + dx, y + dy
            
            # 检查是否在地图内且不是障碍物
            if grid_map.is_valid_position((nx, ny)) and (nx, ny) not in visited:
                queue.append((nx, ny))
                visited.add((nx, ny))
    
    # 无法到达终点
    return False

def train_simple_dqn(width=20, height=20, episodes=200, seed=42, use_warehouse=True, max_steps=50):
    """
    在小型网格环境中训练DQN智能体
    
    Args:
        width: 地图宽度
        height: 地图高度
        episodes: 训练回合数
        seed: 随机种子
        use_warehouse: 是否使用仓储地图
        max_steps: 每回合最大步数
        
    Returns:
        tuple: (agent, grid_map, rewards_history, steps_history, success_history, losses_history, epsilon_history, avg_q_values_history)
    """
    # 设置随机种子
    set_seed(seed)
    
    # 创建保存目录
    os.makedirs("models", exist_ok=True)
    os.makedirs("results", exist_ok=True)
    
    # 创建地图
    obstacles = []
    if use_warehouse:
        grid_map, obstacles = create_warehouse_map(width, height, seed)
        print(f"创建仓储地图，共有 {len(obstacles)} 个货架位置")
    else:
        grid_map = create_small_map(width, height, obstacle_ratio=0.15, seed=seed)
    
    # 创建环境，并传递最大步数参数
    env = RobotEnv(grid_map, max_steps=max_steps)
    
    # 创建DQN智能体
    state_dim = 15  # 更新后的状态维度: 10(基本) + 1(振荡特征) + 4(上一动作的one-hot编码)
    action_dim = 4  # [上, 右, 下, 左]
    
    # DQN配置
    dqn_config = {
        'learning_rate': 0.001,
        'gamma': 0.99,
        'epsilon_start': 1.0,
        'epsilon_end': 0.01,
        'epsilon_decay': 0.995,  # 更慢的衰减，适合更长的训练
        'batch_size': 32,
        'memory_size': 10000,    # 增加记忆容量
        'target_update': 10      # 增加目标网络更新频率
    }
    
    agent = DQNAgent(
        state_dim=state_dim,
        action_dim=action_dim,
        config=dqn_config,
        use_double_dqn=True,
        use_prioritized=True
    )
    
    # 记录训练数据
    rewards_history = []
    steps_history = []
    success_history = []
    losses_history = []
    epsilon_history = []
    avg_q_values_history = []
    
    # 用于计算移动平均
    reward_window = deque(maxlen=100)
    success_window = deque(maxlen=100)
    steps_window = deque(maxlen=100)
    
    # 设置动态可视化 - 包括训练指标和环境
    plt.ion()  # 打开交互模式
    
    # 创建两个图表：一个用于环境可视化，一个用于训练指标
    fig_env, ax_env = plt.subplots(figsize=(10, 10))
    fig_metrics, axs_metrics = plt.subplots(2, 3, figsize=(18, 10))
    plt.show()
    
    # 开始训练
    print(f"开始训练简化DQN智能体，共{episodes}回合...")
    
    for episode in range(1, episodes + 1):
        # 在仓储环境中，随机选择一个货架位置作为目标
        goal_pos = None
        if use_warehouse and obstacles:
            # 随机选择一个货架旁边的位置作为目标（不是货架本身，而是货架旁边的通道）
            obstacle_pos = random.choice(obstacles)
            # 找到货架旁边的可通行位置
            possible_goals = []
            for dx, dy in [(0, 1), (1, 0), (0, -1), (-1, 0)]:  # 上、右、下、左
                nx, ny = obstacle_pos[0] + dx, obstacle_pos[1] + dy
                if grid_map.is_valid_position((nx, ny)) and (nx, ny) not in obstacles:
                    possible_goals.append((nx, ny))
            
            if possible_goals:
                goal_pos = random.choice(possible_goals)
        
        # 确保起点和目标点之间有可达路径
        state = None
        while state is None:
            try:
                state = env.reset(goal_pos=goal_pos, ensure_path=True)
            except ValueError as e:
                print(f"重置环境失败: {e}, 重试...")
                continue
        
        episode_reward = 0
        num_steps = 0
        episode_losses = []
        episode_q_values = []
        
        # 记录路径用于可视化
        path = [env.position]
        
        # 判断是否需要显示动态图（每5轮显示一次）
        show_visualization = episode % 5 == 0
        
        # 初始化环境可视化
        if show_visualization:
            ax_env.clear()
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(fig=fig_env, ax=ax_env, robot_paths=robot_paths, goals=goals)
            ax_env.set_title(f"训练回合 {episode}, 步数: {num_steps}")
            plt.figure(fig_env.number)
            plt.draw()
            plt.pause(0.01)
        
        while True:
            # 选择动作
            action = agent.select_action(state)
            
            # 执行动作
            next_state, reward, done, info = env.step(action)
            
            # 记录路径
            path.append(env.position)
            
            # 动态可视化环境 - 只在指定回合显示
            if show_visualization:
                ax_env.clear()
                robot_paths = {0: path}
                goals = {0: env.goal}
                env.grid_map.render(fig=fig_env, ax=ax_env, robot_paths=robot_paths, goals=goals)
                ax_env.set_title(f"训练回合 {episode}, 步数: {num_steps+1}")
                plt.figure(fig_env.number)
                plt.draw()
                plt.pause(0.01)
            
            # 存储经验
            agent.store_transition(state, action, reward, next_state, done)
            
            # 学习并记录损失和Q值
            loss, q_value = agent.learn(return_q_value=True)
            if loss is not None:
                episode_losses.append(loss)
            if q_value is not None:
                episode_q_values.append(q_value)
            
            # 更新状态
            state = next_state
            episode_reward += reward
            num_steps += 1
            
            # 如果回合结束或者超过最大步数
            if done or num_steps >= max_steps:  # 使用指定的最大步数
                break
        
        # 更新目标网络
        if episode % agent.target_update == 0:
            agent.update_target_network()
        
        # 更新探索率
        agent.update_epsilon()
        
        # 记录训练数据
        rewards_history.append(episode_reward)
        steps_history.append(num_steps)
        success = env.grid_map.is_done(env.position, env.goal)
        success_history.append(1 if success else 0)
        epsilon_history.append(agent.epsilon)
        
        # 计算平均损失和Q值
        avg_loss = np.mean(episode_losses) if episode_losses else 0
        avg_q_value = np.mean(episode_q_values) if episode_q_values else 0
        losses_history.append(avg_loss)
        avg_q_values_history.append(avg_q_value)
        
        # 更新移动平均窗口
        reward_window.append(episode_reward)
        success_window.append(1 if success else 0)
        steps_window.append(num_steps)
        
        # 打印训练信息
        if episode % 5 == 0:  # 每5轮打印一次
            recent_success_rate = np.mean(list(success_window)) * 100
            recent_reward = np.mean(list(reward_window))
            recent_steps = np.mean(list(steps_window))
            
            print(f"回合: {episode}/{episodes}, "
                f"奖励: {episode_reward:.2f} (平均: {recent_reward:.2f}), "
                f"步数: {num_steps} (平均: {recent_steps:.1f}), "
                f"成功: {'是' if success else '否'}, "
                f"探索率: {agent.epsilon:.3f}, "
                f"成功率: {recent_success_rate:.1f}%, "
                f"损失: {avg_loss:.4f}, "
                f"平均Q值: {avg_q_value:.4f}")
            
            # 更新训练指标图表
            for ax in axs_metrics.flatten():
                ax.clear()
            
            # 奖励曲线
            axs_metrics[0, 0].plot(rewards_history)
            axs_metrics[0, 0].set_title('回合奖励', fontsize=12)
            axs_metrics[0, 0].set_xlabel('回合', fontsize=10)
            axs_metrics[0, 0].set_ylabel('奖励', fontsize=10)
            axs_metrics[0, 0].grid(True)
            
            # 步数曲线
            axs_metrics[0, 1].plot(steps_history)
            axs_metrics[0, 1].set_title('回合步数', fontsize=12)
            axs_metrics[0, 1].set_xlabel('回合', fontsize=10)
            axs_metrics[0, 1].set_ylabel('步数', fontsize=10)
            axs_metrics[0, 1].grid(True)
            
            # 成功率曲线
            window_size = min(100, len(success_history))
            if window_size > 0:
                success_moving_avg = [
                    np.mean(success_history[max(0, i - window_size):i+1]) * 100 
                    for i in range(len(success_history))
                ]
                axs_metrics[0, 2].plot(success_moving_avg)
                axs_metrics[0, 2].set_title(f'成功率 (移动平均, 窗口={window_size})', fontsize=12)
                axs_metrics[0, 2].set_xlabel('回合', fontsize=10)
                axs_metrics[0, 2].set_ylabel('成功率 (%)', fontsize=10)
                axs_metrics[0, 2].set_ylim(0, 105)
                axs_metrics[0, 2].grid(True)
            
            # 损失曲线
            axs_metrics[1, 0].plot([l for l in losses_history if l > 0])
            axs_metrics[1, 0].set_title('训练损失', fontsize=12)
            axs_metrics[1, 0].set_xlabel('回合', fontsize=10)
            axs_metrics[1, 0].set_ylabel('损失', fontsize=10)
            axs_metrics[1, 0].grid(True)
            
            # 探索率曲线
            axs_metrics[1, 1].plot(epsilon_history)
            axs_metrics[1, 1].set_title('探索率 (Epsilon)', fontsize=12)
            axs_metrics[1, 1].set_xlabel('回合', fontsize=10)
            axs_metrics[1, 1].set_ylabel('Epsilon', fontsize=10)
            axs_metrics[1, 1].set_ylim(0, 1.05)
            axs_metrics[1, 1].grid(True)
            
            # 平均Q值曲线
            axs_metrics[1, 2].plot(avg_q_values_history)
            axs_metrics[1, 2].set_title('平均Q值', fontsize=12)
            axs_metrics[1, 2].set_xlabel('回合', fontsize=10)
            axs_metrics[1, 2].set_ylabel('Q值', fontsize=10)
            axs_metrics[1, 2].grid(True)
            
            fig_metrics.suptitle(f'训练指标 - 回合 {episode}/{episodes}', fontsize=14)
            plt.figure(fig_metrics.number)
            plt.tight_layout()
            plt.draw()
            plt.pause(0.01)
        
        # 在显示动态图的回合结束后暂停一下
        if show_visualization:
            plt.pause(0.5)
            
        # 每100轮保存一次模型（与可视化频率无关）
        if episode % 100 == 0:
            checkpoint_path = os.path.join("models", f"dqn_model_warehouse_ep{episode}.pth")
            agent.save_model(checkpoint_path)
            print(f"已保存检查点模型到 {checkpoint_path}")
            
            # 保存当前的训练指标图
            plt.figure(fig_metrics.number)
            plt.savefig(os.path.join("results", f"training_metrics_ep{episode}.png"), dpi=300)
    
    # 关闭交互模式
    plt.ioff()
    
    return agent, grid_map, rewards_history, steps_history, success_history, losses_history, epsilon_history, avg_q_values_history

def evaluate_agent(agent, env, num_episodes=5):
    """评估训练好的智能体"""
    success_count = 0
    total_rewards = 0
    total_steps = 0
    
    # 设置动态可视化
    plt.ion()  # 打开交互模式
    fig, ax = plt.subplots(figsize=(8, 8))
    plt.show()
    
    for episode in range(num_episodes):
        # 确保起点和目标点之间有可达路径
        state = None
        while state is None:
            try:
                state = env.reset(ensure_path=True)
            except ValueError as e:
                print(f"重置环境失败: {e}, 重试...")
                continue
                
        episode_reward = 0
        episode_steps = 0
        
        # 记录路径
        path = [env.position]
        
        # 初始化可视化
        ax.clear()
        robot_paths = {0: path}
        goals = {0: env.goal}
        env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
        ax.set_title(f"评估回合 {episode+1}, 步数: {episode_steps}")
        plt.draw()
        plt.pause(0.5)
        
        while True:
            # 选择动作（评估模式）
            action = agent.select_action(state, eval_mode=True)
            
            # 执行动作
            next_state, reward, done, info = env.step(action)
            
            # 记录路径
            path.append(env.position)
            
            # 动态可视化
            ax.clear()
            robot_paths = {0: path}
            goals = {0: env.goal}
            env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
            ax.set_title(f"评估回合 {episode+1}, 步数: {episode_steps+1}")
            plt.draw()
            plt.pause(0.1)
            
            # 更新状态
            state = next_state
            episode_reward += reward
            episode_steps += 1
            
            # 如果回合结束
            if done or episode_steps >= env.grid_map.width * env.grid_map.height * 2:
                break
        
        # 统计结果
        success = env.grid_map.is_done(env.position, env.goal)
        if success:
            success_count += 1
        
        total_rewards += episode_reward
        total_steps += episode_steps
        
        print(f"评估回合 {episode+1}/{num_episodes}, "
              f"奖励: {episode_reward:.2f}, "
              f"步数: {episode_steps}, "
              f"成功: {'是' if success else '否'}")
        
        # 保存最终路径图
        robot_paths = {0: path}
        goals = {0: env.goal}
        ax.clear()
        env.grid_map.render(fig=fig, ax=ax, robot_paths=robot_paths, goals=goals)
        ax.set_title(f"评估回合 {episode+1}, 步数: {episode_steps}, 成功: {'是' if success else '否'}")
        plt.savefig(os.path.join("results", f"eval_episode_{episode+1}.png"))
        plt.pause(0.5)
    
    # 关闭交互模式
    plt.ioff()
    plt.close()
    
    # 计算平均指标
    success_rate = success_count / num_episodes * 100
    avg_reward = total_rewards / num_episodes
    avg_steps = total_steps / num_episodes
    
    print(f"\n评估结果:")
    print(f"成功率: {success_rate:.1f}%")
    print(f"平均奖励: {avg_reward:.2f}")
    print(f"平均步数: {avg_steps:.1f}")

def main():
    parser = argparse.ArgumentParser(description="简化DQN训练和评估")
    parser.add_argument('--width', type=int, default=20, help='地图宽度')
    parser.add_argument('--height', type=int, default=20, help='地图高度')
    parser.add_argument('--obstacle', type=float, default=0.15, help='障碍物比例(仅在非仓储地图时使用)')
    parser.add_argument('--episodes', type=int, default=1000, help='训练回合数')
    parser.add_argument('--max_steps', type=int, default=200, help='每回合最大步数')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')
    parser.add_argument('--eval', action='store_true', help='是否只进行评估')
    parser.add_argument('--no-eval', action='store_true', help='训练后不进行评估')
    parser.add_argument('--model', type=str, default='models/dqn_model_warehouse_final.pth', help='评估时加载的模型路径')
    parser.add_argument('--warehouse', action='store_true', default=True, help='使用仓储地图')
    parser.add_argument('--no-warehouse', action='store_true', help='不使用仓储地图，使用随机障碍物')
    args = parser.parse_args()
    
    # 判断是否使用仓储地图
    use_warehouse = args.warehouse and not args.no_warehouse
    
    if args.eval:
        # 评估模式
        if not os.path.exists(args.model):
            print(f"模型文件 {args.model} 不存在!")
            return
        
        # 创建环境
        if use_warehouse:
            grid_map, _ = create_warehouse_map(args.width, args.height, args.seed)
        else:
            grid_map = create_small_map(args.width, args.height, args.obstacle, args.seed)
            
        env = RobotEnv(grid_map)
        
        # 创建智能体
        state_dim = 15
        action_dim = 4
        agent = DQNAgent(
            state_dim=state_dim,
            action_dim=action_dim,
            config={'memory_size': 1000},  # 只需要提供必要的参数
            use_double_dqn=True,
            use_prioritized=True
        )
        
        # 加载模型
        agent.load_model(args.model)
        print(f"已加载模型: {args.model}")
        
        # 评估智能体
        evaluate_agent(agent, env, num_episodes=5)
    else:
        # 训练模式
        agent, grid_map, rewards_history, steps_history, success_history, losses_history, epsilon_history, avg_q_values_history = train_simple_dqn(
            width=args.width,
            height=args.height,
            episodes=args.episodes,
            seed=args.seed,
            use_warehouse=use_warehouse,
            max_steps=args.max_steps
        )
        
        # 创建环境
        env = RobotEnv(grid_map)
        
        # 保存最终模型
        final_model_path = os.path.join("models", "dqn_model_warehouse_final.pth" if use_warehouse else "dqn_model_simple_final.pth")
        agent.save_model(final_model_path)
        print(f"最终模型已保存到 {final_model_path}")
        
        # 保存训练曲线
        # 在训练结束后保存最终的训练结果图
        plt.figure(figsize=(20, 15))  # 增大图表尺寸
        
        # 2x3的布局，包含更多指标
        plt.subplot(2, 3, 1)
        plt.plot(rewards_history)
        plt.title('回合奖励', fontsize=16)
        plt.xlabel('回合', fontsize=14)
        plt.ylabel('奖励', fontsize=14)
        plt.grid(True)
        plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.subplot(2, 3, 2)
        plt.plot(steps_history)
        plt.title('回合步数', fontsize=16)
        plt.xlabel('回合', fontsize=14)
        plt.ylabel('步数', fontsize=14)
        plt.grid(True)
        plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.subplot(2, 3, 3)
        window_size = min(100, len(success_history))
        if window_size > 0:
            success_moving_avg = [
                np.mean(success_history[max(0, i - window_size):i+1]) * 100 
                for i in range(len(success_history))
            ]
            plt.plot(success_moving_avg)
            plt.title(f'成功率 (移动平均, 窗口={window_size})', fontsize=16)
            plt.xlabel('回合', fontsize=14)
            plt.ylabel('成功率 (%)', fontsize=14)
            plt.ylim(0, 105)
            plt.grid(True)
            plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.subplot(2, 3, 4)
        plt.plot([l for l in losses_history if l > 0])
        plt.title('训练损失', fontsize=16)
        plt.xlabel('回合', fontsize=14)
        plt.ylabel('损失', fontsize=14)
        plt.grid(True)
        plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.subplot(2, 3, 5)
        plt.plot(epsilon_history)
        plt.title('探索率 (Epsilon)', fontsize=16)
        plt.xlabel('回合', fontsize=14)
        plt.ylabel('Epsilon', fontsize=14)
        plt.ylim(0, 1.05)
        plt.grid(True)
        plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.subplot(2, 3, 6)
        plt.plot(avg_q_values_history)
        plt.title('平均Q值', fontsize=16)
        plt.xlabel('回合', fontsize=14)
        plt.ylabel('Q值', fontsize=14)
        plt.grid(True)
        plt.tick_params(axis='both', which='major', labelsize=12)
        
        plt.tight_layout(pad=3.0)  # 增加子图之间的间距
        plt.savefig(os.path.join("results", "warehouse_training_curves.png" if use_warehouse else "simple_training_curves.png"), dpi=300)  # 增加DPI提高图像质量
        plt.close()  # 不显示，只保存
        
        # 评估智能体（如果需要）
        if not args.no_eval:
            print("\n开始评估训练好的智能体...")
            evaluate_agent(agent, env, num_episodes=5)

if __name__ == "__main__":
    main() 