import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import time
import argparse
from environment import WumpusWorld, Action
from agent import QLearningAgent

# =================== 训练参数配置 ===================
# 环境参数
WORLD_SIZE = 4              # 世界大小
PIT_PROBABILITY = 0.1       # 陷阱概率 (降低了)
RANDOM_POSITIONS = False    # 改为固定放置Wumpus和金子

# 学习参数
LEARNING_RATE = 0.3         # 学习率 (提高了)
DISCOUNT_FACTOR = 0.95      # 折扣因子
INITIAL_EXPLORATION_RATE = 1.0  # 初始探索率
EXPLORATION_DECAY = 0.9998  # 探索率衰减 (降低了)
MIN_EXPLORATION_RATE = 0.05 # 最小探索率 (提高了)

# 训练参数
NUM_EPISODES = 50000        # 训练回合数 (增加了)
MAX_STEPS_PER_EPISODE = 100 # 每回合最大步数
SAVE_INTERVAL = 5000        # 保存间隔（回合数）
EVAL_INTERVAL = 1000        # 评估间隔（回合数）
EVAL_EPISODES = 100         # 评估时的回合数

# 结果参数
OUTPUT_DIR = "output"       # 输出目录
FINAL_MODEL_PATH = os.path.join(OUTPUT_DIR, "wumpus_q_table_final.npy")  # 最终模型路径
RESULTS_PLOT_PATH = os.path.join(OUTPUT_DIR, "training_results.png")     # 结果图表路径
# ====================================================

# 配置中文显示
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

def evaluate_agent(env, agent, num_episodes=EVAL_EPISODES):
    """
    评估代理性能，不进行探索
    
    参数:
    - env: Wumpus环境
    - agent: 强化学习代理
    - num_episodes: 评估的回合数
    
    返回:
    - avg_score: 平均得分
    - win_rate: 胜率
    """
    wins = 0
    total_score = 0
    
    print(f"开始评估 {num_episodes} 回合...")
    eval_start_time = time.time()
    
    for i in range(num_episodes):
        state = env.reset()
        episode_score = 0
        done = False
        
        while not done:
            action = agent.get_action(state, training=False)
            next_state, reward, done, _ = env.step(action)
            episode_score += reward
            state = next_state
        
        total_score += episode_score
        if env.won:
            wins += 1
        
        # 每10回合输出一次进度
        if (i+1) % 10 == 0:
            print(f"评估进度: {i+1}/{num_episodes} 回合完成")
    
    eval_time = time.time() - eval_start_time
    print(f"评估完成，用时: {eval_time:.2f}秒")
    
    return total_score/num_episodes, wins/num_episodes

def train_agent(env, agent, num_episodes=NUM_EPISODES, max_steps=MAX_STEPS_PER_EPISODE, 
                save_interval=SAVE_INTERVAL, eval_interval=EVAL_INTERVAL,
                output_dir=OUTPUT_DIR, final_model_path=FINAL_MODEL_PATH):
    """
    训练代理
    
    参数:
    - env: Wumpus环境
    - agent: Q-learning代理
    - num_episodes: 训练的回合数
    - max_steps: 每回合的最大步数
    - save_interval: 保存Q表的间隔
    - eval_interval: 评估间隔
    - output_dir: 输出目录
    - final_model_path: 最终模型路径
    
    返回:
    - scores: 每回合的得分
    - wins: 每回合是否获胜
    - eval_scores: 评估得分
    - eval_win_rates: 评估胜率
    """
    # 确保输出目录存在
    os.makedirs(output_dir, exist_ok=True)
    
    scores = []
    wins = []
    eval_scores = []
    eval_win_rates = []
    eval_episodes = []
    
    start_time = time.time()
    overall_start_time = time.time()
    
    # 输出总训练进度初始值
    print(f"总训练进度: 0/{num_episodes} 回合 (0%)")
    
    for episode in range(1, num_episodes+1):
        state = env.reset()
        total_reward = 0
        
        for step in range(max_steps):
            # 获取动作
            action = agent.get_action(state)
            
            # 执行动作
            next_state, reward, done, _ = env.step(action)
            
            # 更新Q表
            agent.learn(state, action, reward, next_state, done)
            
            total_reward += reward
            state = next_state
            
            if done:
                break
        
        # 记录得分和是否获胜
        scores.append(total_reward)
        wins.append(env.won)
        
        # 定期评估代理性能
        if episode % eval_interval == 0:
            avg_score, win_rate = evaluate_agent(env, agent)
            eval_scores.append(avg_score)
            eval_win_rates.append(win_rate)
            eval_episodes.append(episode)
            
            print(f"\n=== 评估结果 (回合 {episode}) ===")
            print(f"平均得分: {avg_score:.2f}")
            print(f"胜率: {win_rate:.2f}")
            print("============================\n")
        
        # 打印训练进度
        if episode % 100 == 0:
            avg_score = np.mean(scores[-100:])
            win_rate = np.mean(wins[-100:]) if wins else 0
            elapsed_time = time.time() - start_time
            total_elapsed = time.time() - overall_start_time
            estimated_total = (total_elapsed / episode) * num_episodes
            remaining_time = estimated_total - total_elapsed
            
            print(f"回合: {episode}/{num_episodes}, 平均得分: {avg_score:.2f}, "
                  f"胜率: {win_rate:.2f}, 探索率: {agent.exploration_rate:.4f}, "
                  f"耗时: {elapsed_time:.2f}秒")
            
            # 重置计时
            start_time = time.time()
        
        # 每5%的进度显示一次总体进度
        progress_interval = max(1, num_episodes // 20)  # 至少每回合检查一次
        if episode % progress_interval == 0 or episode == num_episodes:
            progress = episode / num_episodes * 100
            total_elapsed = time.time() - overall_start_time
            estimated_total = (total_elapsed / episode) * num_episodes
            remaining_time = estimated_total - total_elapsed
            
            print(f"总训练进度: {episode}/{num_episodes} 回合 ({progress:.1f}%), "
                  f"已用时: {total_elapsed/60:.1f}分钟, 预计剩余: {remaining_time/60:.1f}分钟")
        
        # 保存中间模型
        if episode % save_interval == 0:
            interim_model_path = os.path.join(output_dir, f"wumpus_q_table_ep{episode}.npy")
            agent.save_q_table(interim_model_path)
            print(f"中间模型已保存至: {interim_model_path}")
    
    # 保存最终Q表
    agent.save_q_table(final_model_path)
    
    total_training_time = time.time() - overall_start_time
    print(f"训练全部完成! 总用时: {total_training_time/60:.2f}分钟")
    
    return scores, wins, eval_scores, eval_win_rates, eval_episodes

def plot_training_results(scores, wins, eval_scores=None, eval_win_rates=None, 
                         eval_episodes=None, window_size=100, save_path=RESULTS_PLOT_PATH):
    """绘制训练结果"""
    # 计算滑动平均
    def moving_average(data, window_size):
        return np.convolve(data, np.ones(window_size)/window_size, mode='valid')
    
    episodes = range(1, len(scores)+1)
    
    # 绘制得分
    plt.figure(figsize=(14, 12))
    
    plt.subplot(2, 1, 1)
    plt.plot(episodes, scores, 'b-', alpha=0.3, label='训练得分')
    if len(scores) >= window_size:
        ma_scores = moving_average(scores, window_size)
        ma_episodes = range(window_size, len(scores)+1)
        plt.plot(ma_episodes, ma_scores, 'r-', label=f'移动平均 (窗口={window_size})')
    
    # 如果有评估结果，也绘制出来
    if eval_scores and eval_episodes:
        plt.plot(eval_episodes, eval_scores, 'go-', label='评估得分')
    
    plt.title('训练过程中的得分')
    plt.xlabel('回合')
    plt.ylabel('得分')
    plt.legend()
    plt.grid(True)
    
    # 绘制胜率
    plt.subplot(2, 1, 2)
    win_rate = [sum(wins[:i+1])/(i+1) for i in range(len(wins))]
    plt.plot(episodes, win_rate, 'b-', label='累积胜率')
    
    # 计算和绘制移动平均胜率
    if len(wins) >= window_size:
        ma_win_rate = moving_average(np.array(wins, dtype=float), window_size)
        ma_episodes = range(window_size, len(wins)+1)
        plt.plot(ma_episodes, ma_win_rate, 'r-', label=f'移动平均胜率 (窗口={window_size})')
    
    # 如果有评估胜率，也绘制出来
    if eval_win_rates and eval_episodes:
        plt.plot(eval_episodes, eval_win_rates, 'go-', label='评估胜率')
    
    plt.title('训练过程中的胜率')
    plt.xlabel('回合')
    plt.ylabel('胜率')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(save_path)
    plt.show()

def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='训练Wumpus世界强化学习代理')
    
    parser.add_argument('--episodes', type=int, default=NUM_EPISODES,
                        help=f'训练回合数 (默认: {NUM_EPISODES})')
    parser.add_argument('--max-steps', type=int, default=MAX_STEPS_PER_EPISODE,
                        help=f'每回合最大步数 (默认: {MAX_STEPS_PER_EPISODE})')
    parser.add_argument('--learning-rate', type=float, default=LEARNING_RATE,
                        help=f'学习率 (默认: {LEARNING_RATE})')
    parser.add_argument('--discount', type=float, default=DISCOUNT_FACTOR,
                        help=f'折扣因子 (默认: {DISCOUNT_FACTOR})')
    parser.add_argument('--exploration', type=float, default=INITIAL_EXPLORATION_RATE,
                        help=f'初始探索率 (默认: {INITIAL_EXPLORATION_RATE})')
    parser.add_argument('--exploration-decay', type=float, default=EXPLORATION_DECAY,
                        help=f'探索率衰减 (默认: {EXPLORATION_DECAY})')
    parser.add_argument('--min-exploration', type=float, default=MIN_EXPLORATION_RATE,
                        help=f'最小探索率 (默认: {MIN_EXPLORATION_RATE})')
    parser.add_argument('--pit-prob', type=float, default=PIT_PROBABILITY,
                        help=f'陷阱概率 (默认: {PIT_PROBABILITY})')
    parser.add_argument('--random', action='store_true', 
                        help='使用随机位置 (默认: False)')
    parser.add_argument('--output-dir', type=str, default=OUTPUT_DIR,
                        help=f'输出目录 (默认: {OUTPUT_DIR})')
    parser.add_argument('--eval-episodes', type=int, default=EVAL_EPISODES,
                        help=f'每次评估的回合数 (默认: {EVAL_EPISODES})')
    parser.add_argument('--eval-interval', type=int, default=EVAL_INTERVAL,
                        help=f'评估间隔回合数 (默认: {EVAL_INTERVAL})')
    
    return parser.parse_args()

def main():
    """训练脚本的主函数"""
    # 解析命令行参数
    args = parse_arguments()
    
    # 确保输出目录存在
    os.makedirs(args.output_dir, exist_ok=True)
    final_model_path = os.path.join(args.output_dir, "wumpus_q_table_final.npy")
    results_plot_path = os.path.join(args.output_dir, "training_results.png")
    
    # 打印训练参数
    print("======== 训练参数 ========")
    print(f"回合数: {args.episodes}")
    print(f"每回合最大步数: {args.max_steps}")
    print(f"学习率: {args.learning_rate}")
    print(f"折扣因子: {args.discount}")
    print(f"初始探索率: {args.exploration}")
    print(f"探索率衰减: {args.exploration_decay}")
    print(f"最小探索率: {args.min_exploration}")
    print(f"陷阱概率: {args.pit_prob}")
    print(f"随机位置: {args.random}")
    print(f"评估间隔: {args.eval_interval}")
    print(f"评估回合数: {args.eval_episodes}")
    print(f"输出目录: {args.output_dir}")
    print("=========================")
    
    # 创建环境
    env = WumpusWorld(size=WORLD_SIZE, pit_prob=args.pit_prob, random_positions=args.random)
    
    # 计算状态空间和动作空间大小
    state_size = WORLD_SIZE * WORLD_SIZE * 2 * 2 * 2  # (x, y, 是否有金子, 是否有箭, Wumpus是否活着)
    action_size = len(Action)
    
    # 创建代理
    agent = QLearningAgent(
        state_size=state_size,
        action_size=action_size,
        learning_rate=args.learning_rate,
        discount_factor=args.discount,
        exploration_rate=args.exploration,
        exploration_decay=args.exploration_decay,
        min_exploration_rate=args.min_exploration
    )
    
    # 训练代理
    print(f"开始训练 {args.episodes} 回合...")
    start_time = time.time()
    scores, wins, eval_scores, eval_win_rates, eval_episodes = train_agent(
        env=env,
        agent=agent,
        num_episodes=args.episodes,
        max_steps=args.max_steps,
        save_interval=SAVE_INTERVAL,
        eval_interval=args.eval_interval,
        output_dir=args.output_dir,
        final_model_path=final_model_path
    )
    
    total_time = time.time() - start_time
    print(f"训练完成! 总用时: {total_time:.2f}秒")
    
    # 绘制训练结果
    plot_training_results(
        scores, wins, 
        eval_scores, eval_win_rates, eval_episodes,
        save_path=results_plot_path
    )
    
    # 最终评估
    print("\n执行最终评估...")
    final_score, final_win_rate = evaluate_agent(env, agent, num_episodes=500)
    print(f"最终评估结果 (500回合):")
    print(f"平均得分: {final_score:.2f}")
    print(f"胜率: {final_win_rate:.2f}")
    
    print(f"\n最终模型已保存至: {final_model_path}")
    print(f"训练结果图表已保存至: {results_plot_path}")

if __name__ == "__main__":
    main()