import os
import torch
import numpy as np
from tqdm import tqdm
from typing import List

from poker.game import TexasHoldem
from agents.dqn_agent import DQNAgent

def train_agents(num_agents: int = 6, 
                num_episodes: int = 10000,
                save_interval: int = 100,
                save_dir: str = 'models'):
    """训练多个AI代理
    
    Args:
        num_agents: AI代理数量
        num_episodes: 训练回合数
        save_interval: 保存模型的间隔回合数
        save_dir: 模型保存目录
    """
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    
    # 创建AI代理
    agents = [DQNAgent(f"Agent_{i}") for i in range(num_agents)]
    
    # 创建游戏环境
    env = TexasHoldem(agents)
    
    # 训练循环
    for episode in tqdm(range(num_episodes), desc="Training"):
        # 重置环境
        state = env.reset()
        done = False
        
        # 记录每个代理的经验
        experiences = {i: [] for i in range(num_agents)}
        
        # 玩一局游戏
        while not done:
            current_player = env.current_player_idx
            agent = agents[current_player]
            
            # 选择动作
            action_type, bet_amount = agent.act(state)
            
            # 执行动作
            next_state, reward, done, _ = env.step(action_type, bet_amount)
            
            # 记录经验
            experiences[current_player].append((
                state.copy(),
                (action_type, bet_amount),
                reward,
                next_state.copy() if not done else None,
                done
            ))
            
            state = next_state
        
        # 训练每个代理
        for agent_idx, agent in enumerate(agents):
            for exp in experiences[agent_idx]:
                state, action, reward, next_state, done = exp
                agent.remember(state, action, reward, 
                             next_state if not done else state, 
                             done)
            agent.train()
        
        # 定期保存模型
        if (episode + 1) % save_interval == 0:
            for i, agent in enumerate(agents):
                save_path = os.path.join(save_dir, f'agent_{i}_episode_{episode+1}.pt')
                agent.save(save_path)

def evaluate_agents(agents: List[DQNAgent], 
                   num_games: int = 100) -> List[float]:
    """评估AI代理的性能
    
    Args:
        agents: 要评估的AI代理列表
        num_games: 评估游戏局数
        
    Returns:
        每个代理的平均筹码量
    """
    env = TexasHoldem(agents)
    total_chips = [0] * len(agents)
    
    for _ in tqdm(range(num_games), desc="Evaluating"):
        state = env.reset()
        done = False
        
        while not done:
            current_player = env.current_player_idx
            agent = agents[current_player]
            
            # 使用贪婪策略（不探索）
            old_epsilon = agent.epsilon
            agent.epsilon = 0
            action_type, bet_amount = agent.act(state)
            agent.epsilon = old_epsilon
            
            state, _, done, _ = env.step(action_type, bet_amount)
        
        # 记录每个代理的筹码量
        for i, agent in enumerate(agents):
            total_chips[i] += agent.chips
    
    # 计算平均筹码量
    avg_chips = [chips / num_games for chips in total_chips]
    return avg_chips

if __name__ == '__main__':
    # 训练6个AI代理
    train_agents()
    
    # 加载训练好的模型进行评估
    agents = []
    for i in range(6):
        agent = DQNAgent(f"Agent_{i}")
        agent.load(f'models/agent_{i}_episode_10000.pt')
        agents.append(agent)
    
    # 评估性能
    avg_chips = evaluate_agents(agents)
    
    # 打印结果
    print("\n评估结果：")
    for i, chips in enumerate(avg_chips):
        print(f"Agent_{i} 平均筹码量: {chips:.2f}") 