"""
AI训练脚本 - 使用stable-baselines3训练AI模型
"""
import os
import time
import numpy as np
import torch
from stable_baselines3 import PPO, DQN, A2C
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.callbacks import BaseCallback, CheckpointCallback
from ball_battle_env import MultiAgentBallBattleEnv
from game_ai import GameAI, RandomAgent, TrainedAgent
from settings import *


class TrainingCallback(BaseCallback):
    """训练回调函数，用于记录训练过程"""
    
    def __init__(self, verbose=1):
        super(TrainingCallback, self).__init__(verbose)
        self.episode_rewards = []
        self.episode_lengths = []
        self.episode_count = 0
    
    def _on_step(self) -> bool:
        # 检查是否完成一个回合
        if self.locals.get("dones", [False])[0]:
            # 记录回合信息
            info = self.locals.get("infos", [{}])[0]
            episode_reward = info.get("episode", {}).get("r", 0)
            episode_length = info.get("episode", {}).get("l", 0)
            
            self.episode_rewards.append(episode_reward)
            self.episode_lengths.append(episode_length)
            self.episode_count += 1
            
            # 打印训练进度
            if self.episode_count % 10 == 0:
                avg_reward = np.mean(self.episode_rewards[-10:])
                avg_length = np.mean(self.episode_lengths[-10:])
                print(f"回合 {self.episode_count}: 平均奖励 {avg_reward:.2f}, 平均步数 {avg_length:.2f}")
        
        return True


def train_model(algorithm="PPO", total_timesteps=100000, model_save_path="models/", log_dir="logs/"):
    """训练AI模型"""
    # 创建目录
    os.makedirs(model_save_path, exist_ok=True)
    os.makedirs(log_dir, exist_ok=True)
    
    # 创建环境
    env = MultiAgentBallBattleEnv(render_mode=None)
    env = DummyVecEnv([lambda: env])
    
    # 选择算法
    if algorithm.upper() == "PPO":
        model = PPO("MlpPolicy", env, verbose=1, tensorboard_log=log_dir)
    elif algorithm.upper() == "DQN":
        model = DQN("MlpPolicy", env, verbose=1, tensorboard_log=log_dir)
    elif algorithm.upper() == "A2C":
        model = A2C("MlpPolicy", env, verbose=1, tensorboard_log=log_dir)
    else:
        print(f"不支持的算法: {algorithm}，使用默认的PPO")
        model = PPO("MlpPolicy", env, verbose=1, tensorboard_log=log_dir)
    
    # 创建回调函数
    checkpoint_callback = CheckpointCallback(
        save_freq=10000,
        save_path=model_save_path,
        name_prefix=f"{algorithm.lower()}_model"
    )
    
    training_callback = TrainingCallback()
    
    # 开始训练
    print(f"开始使用 {algorithm} 算法训练模型，总步数: {total_timesteps}")
    start_time = time.time()
    
    model.learn(
        total_timesteps=total_timesteps,
        callback=[checkpoint_callback, training_callback]
    )
    
    end_time = time.time()
    print(f"训练完成，耗时: {end_time - start_time:.2f} 秒")
    
    # 保存最终模型
    final_model_path = os.path.join(model_save_path, f"{algorithm.lower()}_final_model")
    model.save(final_model_path)
    print(f"模型已保存到: {final_model_path}")
    
    return model


def test_model(model_path, num_episodes=5):
    """测试训练好的模型"""
    # 创建环境
    env = MultiAgentBallBattleEnv(render_mode="human")
    
    # 加载模型
    if model_path.endswith(".zip"):
        model_path = model_path[:-4]  # 移除.zip扩展名
    
    # 根据文件名确定算法
    if "ppo" in model_path.lower():
        model = PPO.load(model_path)
    elif "dqn" in model_path.lower():
        model = DQN.load(model_path)
    elif "a2c" in model_path.lower():
        model = A2C.load(model_path)
    else:
        print(f"无法确定模型算法，尝试使用PPO加载: {model_path}")
        model = PPO.load(model_path)
    
    # 测试模型
    print(f"开始测试模型: {model_path}")
    
    for episode in range(num_episodes):
        obs = env.reset()
        done = False
        total_reward = 0
        step_count = 0
        
        while not done:
            # 使用模型预测动作
            action, _ = model.predict(obs, deterministic=True)
            
            # 执行动作
            obs, reward, done, info = env.step(action)
            total_reward += reward
            step_count += 1
            
            # 渲染环境
            env.render()
            
            # 控制游戏速度
            pygame.time.delay(50)
        
        print(f"回合 {episode + 1}: 奖励 {total_reward:.2f}, 步数 {step_count}")
    
    env.close()


def compare_random_vs_trained(model_path, num_episodes=10):
    """比较随机AI和训练后AI的性能"""
    print("开始比较随机AI和训练后AI的性能...")
    
    # 创建环境
    env = MultiAgentBallBattleEnv(render_mode=None)
    
    # 加载训练好的模型
    if model_path.endswith(".zip"):
        model_path = model_path[:-4]
    
    if "ppo" in model_path.lower():
        model = PPO.load(model_path)
    elif "dqn" in model_path.lower():
        model = DQN.load(model_path)
    elif "a2c" in model_path.lower():
        model = A2C.load(model_path)
    else:
        print(f"无法确定模型算法，尝试使用PPO加载: {model_path}")
        model = PPO.load(model_path)
    
    # 测试随机AI
    random_rewards = []
    random_lengths = []
    
    print("测试随机AI...")
    for episode in range(num_episodes):
        obs = env.reset()
        done = False
        total_reward = 0
        step_count = 0
        
        while not done:
            # 随机选择动作
            action = env.action_space.sample()
            
            # 执行动作
            obs, reward, done, info = env.step(action)
            total_reward += reward
            step_count += 1
        
        random_rewards.append(total_reward)
        random_lengths.append(step_count)
    
    # 测试训练后的AI
    trained_rewards = []
    trained_lengths = []
    
    print("测试训练后的AI...")
    for episode in range(num_episodes):
        obs = env.reset()
        done = False
        total_reward = 0
        step_count = 0
        
        while not done:
            # 使用模型预测动作
            action, _ = model.predict(obs, deterministic=True)
            
            # 执行动作
            obs, reward, done, info = env.step(action)
            total_reward += reward
            step_count += 1
        
        trained_rewards.append(total_reward)
        trained_lengths.append(step_count)
    
    # 输出比较结果
    print("\n比较结果:")
    print(f"随机AI - 平均奖励: {np.mean(random_rewards):.2f}, 平均步数: {np.mean(random_lengths):.2f}")
    print(f"训练AI - 平均奖励: {np.mean(trained_rewards):.2f}, 平均步数: {np.mean(trained_lengths):.2f}")
    
    if np.mean(trained_rewards) > np.mean(random_rewards):
        improvement = (np.mean(trained_rewards) - np.mean(random_rewards)) / abs(np.mean(random_rewards)) * 100
        print(f"训练后的AI性能提升了 {improvement:.2f}%")
    else:
        decline = (np.mean(random_rewards) - np.mean(trained_rewards)) / abs(np.mean(random_rewards)) * 100
        print(f"训练后的AI性能下降了 {decline:.2f}%")
    
    env.close()


if __name__ == '__main__':
    """程序入口点"""
    import argparse
    
    parser = argparse.ArgumentParser(description="球球大战AI训练脚本")
    parser.add_argument("--mode", type=str, default="train", choices=["train", "test", "compare"],
                        help="运行模式: train(训练), test(测试), compare(比较)")
    parser.add_argument("--algorithm", type=str, default="PPO", choices=["PPO", "DQN", "A2C"],
                        help="训练算法")
    parser.add_argument("--timesteps", type=int, default=100000,
                        help="训练步数")
    parser.add_argument("--model", type=str, default=None,
                        help="模型路径(用于测试和比较)")
    parser.add_argument("--episodes", type=int, default=5,
                        help="测试回合数")
    
    args = parser.parse_args()
    
    if args.mode == "train":
        train_model(args.algorithm, args.timesteps)
    elif args.mode == "test":
        if not args.model:
            print("测试模式需要指定模型路径 (--model)")
        else:
            test_model(args.model, args.episodes)
    elif args.mode == "compare":
        if not args.model:
            print("比较模式需要指定模型路径 (--model)")
        else:
            compare_random_vs_trained(args.model, args.episodes)