import gymnasium as gym
import numpy as np
import sys
import os

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from env.ball_battle_env import BallBattleEnv
from agents.dqn_agent import DQNAgent
from stable_baselines3 import DQN
from stable_baselines3.common.env_util import make_vec_env
import torch
from tqdm import tqdm
import argparse

def train_with_custom_dqn(model_path=None):
    """使用自定义DQN代理训练"""
    print("Training with custom DQN agent...")
    
    # 创建环境
    env = BallBattleEnv(render_mode=None)
    
    # 创建代理
    agent = DQNAgent(
        action_space=env.action_space,
        observation_space=env.observation_space,
        lr=1e-3,
        gamma=0.99,
        epsilon=1.0,
        epsilon_decay=0.995,
        epsilon_min=0.01,
        buffer_size=10000,
        batch_size=32
    )
    
    # 如果提供了模型路径，则加载现有模型继续训练
    if model_path and os.path.exists(model_path):
        print(f"Loading existing model from {model_path} to continue training...")
        agent.load(model_path)
        print(f"Model loaded. Current epsilon: {agent.epsilon}")
    
    # 训练参数
    episodes = 1000
    max_steps = 1000
    
    # 创建模型保存目录
    os.makedirs("models", exist_ok=True)
    
    # 训练循环
    for episode in tqdm(range(episodes), desc="Training Episodes"):
        observation, info = env.reset()
        total_reward = 0
        
        for step in range(max_steps):
            # 选择动作
            action = agent.predict(observation)
            
            # 执行动作
            next_observation, reward, terminated, truncated, info = env.step(action)
            
            # 学习
            agent.learn(observation, action, reward, next_observation, terminated or truncated)
            
            # 更新观察
            observation = next_observation
            total_reward += reward
            
            # 检查终止条件
            if terminated or truncated:
                break
                
        # 每100个episode保存一次模型
        if (episode + 1) % 100 == 0:
            agent.save(f"models/custom_dqn_agent_{episode+1}.pth")
            print(f"Episode {episode+1}, Total Reward: {total_reward:.2f}, Epsilon: {agent.epsilon:.3f}")
            
    # 保存最终模型
    agent.save("models/custom_dqn_agent_final.pth")
    print("Training completed!")

def train_with_stable_baselines3():
    """使用Stable-Baselines3训练"""
    print("Training with Stable-Baselines3 DQN...")
    
    # 创建向量化环境
    env = make_vec_env(BallBattleEnv, n_envs=4)
    
    # 创建DQN模型
    model = DQN(
        "MlpPolicy",
        env,
        learning_rate=1e-3,
        buffer_size=50000,
        learning_starts=1000,
        batch_size=32,
        tau=1.0,
        gamma=0.99,
        train_freq=4,
        gradient_steps=1,
        target_update_interval=1000,
        exploration_fraction=0.1,
        exploration_initial_eps=1.0,
        exploration_final_eps=0.05,
        max_grad_norm=10,
        verbose=1,
        tensorboard_log="./tensorboard_logs/"
    )
    
    # 训练模型
    model.learn(total_timesteps=100000, log_interval=100)
    
    # 创建模型保存目录
    os.makedirs("models", exist_ok=True)
    
    # 保存模型
    model.save("models/sb3_dqn_model")
    print("Training completed!")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="Train Ball Battle AI")
    parser.add_argument("--agent", choices=["custom", "sb3"], default="sb3",
                        help="Choose agent type: custom DQN or Stable-Baselines3 (default: sb3)")
    parser.add_argument("--model", type=str, help="Path to existing model for continued training")
    args = parser.parse_args()
    
    if args.agent == "custom":
        train_with_custom_dqn(args.model)
    else:
        train_with_stable_baselines3()

if __name__ == "__main__":
    main()