import os
import numpy as np
from tqdm import tqdm
from typing import List, Dict
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import cross_val_score

from poker.game import TexasHoldem
from poker.card import Card
from agents.dqn_agent import DQNAgent

class PokerEvaluator:
    """德州扑克AI评估器"""
    
    def __init__(self, num_agents: int = 6, initial_chips: int = 1000):
        self.num_agents = num_agents
        self.initial_chips = initial_chips
        self.agents = [DQNAgent(f"Agent_{i}", initial_chips) for i in range(num_agents)]
        self.env = TexasHoldem(self.agents)
        
        # 记录统计信息
        self.stats = {
            'chips_history': [],  # 筹码历史
            'win_rates': [],      # 胜率
            'action_stats': [],   # 动作统计
            'hand_stats': [],     # 手牌统计
        }
    
    def train_and_evaluate(self, 
                          num_training_games: int = 1000,
                          eval_interval: int = 100,
                          num_eval_games: int = 100):
        """训练并评估AI
        
        Args:
            num_training_games: 训练游戏局数
            eval_interval: 评估间隔
            num_eval_games: 每次评估的游戏局数
        """
        # 训练循环
        for game in tqdm(range(num_training_games), desc="Training"):
            self._play_game(is_training=True)
            
            # 定期评估
            if (game + 1) % eval_interval == 0:
                self.evaluate(num_eval_games)
        
        # 绘制结果
        self._plot_results()
    
    def evaluate(self, num_games: int = 100):
        """评估AI性能
        
        Args:
            num_games: 评估游戏局数
        """
        # 记录原始探索率
        original_epsilons = [agent.epsilon for agent in self.agents]
        
        # 设置为纯贪婪策略
        for agent in self.agents:
            agent.epsilon = 0
        
        # 游戏统计
        chips_history = []
        action_counts = {agent.name: {'fold': 0, 'check': 0, 'call': 0, 'raise': 0} 
                        for agent in self.agents}
        hand_stats = {agent.name: [] for agent in self.agents}
        
        # 进行评估
        for _ in range(num_games):
            game_stats = self._play_game(is_training=False)
            
            # 记录筹码变化
            chips_history.append([agent.chips for agent in self.agents])
            
            # 记录动作统计
            for agent_name, actions in game_stats['actions'].items():
                for action in actions:
                    action_counts[agent_name][action] += 1
            
            # 记录手牌统计
            for agent_name, hands in game_stats['hands'].items():
                hand_stats[agent_name].extend(hands)
        
        # 计算统计信息
        avg_chips = np.mean(chips_history, axis=0)
        win_rates = [(chips - self.initial_chips) / (num_games * self.initial_chips)
                    for chips in avg_chips]
        
        # 保存统计信息
        self.stats['chips_history'].append(avg_chips)
        self.stats['win_rates'].append(win_rates)
        self.stats['action_stats'].append(action_counts)
        self.stats['hand_stats'].append(hand_stats)
        
        # 打印评估结果
        print("\n评估结果:")
        for i, agent in enumerate(self.agents):
            print(f"\n{agent.name}:")
            print(f"平均筹码: {avg_chips[i]:.2f}")
            print(f"胜率: {win_rates[i]:.2%}")
            print("动作分布:")
            for action, count in action_counts[agent.name].items():
                print(f"  {action}: {count/sum(action_counts[agent.name].values()):.2%}")
        
        # 恢复探索率
        for agent, epsilon in zip(self.agents, original_epsilons):
            agent.epsilon = epsilon
        
        # 评估决策树模型
        self.evaluate_decision_tree(self.agents[0])
    
    def _play_game(self, is_training: bool = True) -> Dict:
        """进行一局游戏"""
        print("开始新游戏")  # 添加日志
        state = self.env.reset()
        done = False
        
        # 记录游戏信息
        game_stats = {
            'actions': {agent.name: [] for agent in self.agents},
            'hands': {agent.name: [] for agent in self.agents}
        }
        
        round_count = 0  # 添加回合计数
        while not done:
            round_count += 1
            if round_count % 100 == 0:  # 每100回合打印一次
                print(f"当前回合: {round_count}")
                
            current_player = self.agents[self.env.current_player_idx]
            print(f"当前玩家: {current_player.name}")  # 添加日志
            
            # 记录手牌信息
            if len(state['community_cards']) == 5:  # 河牌圈
                hand = current_player.evaluator.get_hand_strength(
                    current_player.hole_cards,
                    state['community_cards']
                )
                game_stats['hands'][current_player.name].append(hand)
            
            # 选择动作
            action_type, bet_amount = current_player.act(state)
            print(f"动作: {action_type}, 下注: {bet_amount}")  # 添加日志
            game_stats['actions'][current_player.name].append(action_type)
            
            # 执行动作
            next_state, reward, done, _ = self.env.step(action_type, bet_amount)
            
            if is_training:
                # 存储经验并训练
                current_player.remember(state, (action_type, bet_amount), 
                                     reward, next_state, done)
                current_player.train()
            
            state = next_state
        
        print("游戏结束")  # 添加日志
        return game_stats
    
    def _plot_results(self):
        """绘制评估结果"""
        # 创建图表
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
        
        # 1. 筹码历史
        for i, agent in enumerate(self.agents):
            chips = [history[i] for history in self.stats['chips_history']]
            ax1.plot(chips, label=agent.name)
        ax1.set_title('平均筹码变化')
        ax1.set_xlabel('评估次数')
        ax1.set_ylabel('筹码量')
        ax1.legend()
        
        # 2. 胜率变化
        for i, agent in enumerate(self.agents):
            rates = [rates[i] for rates in self.stats['win_rates']]
            ax2.plot(rates, label=agent.name)
        ax2.set_title('胜率变化')
        ax2.set_xlabel('评估次数')
        ax2.set_ylabel('胜率')
        ax2.legend()
        
        # 3. 最终动作分布
        action_stats = self.stats['action_stats'][-1]
        agents = list(action_stats.keys())
        actions = list(action_stats[agents[0]].keys())
        x = np.arange(len(agents))
        width = 0.2
        
        for i, action in enumerate(actions):
            values = [action_stats[agent][action] for agent in agents]
            total = [sum(action_stats[agent].values()) for agent in agents]
            percentages = [v/t for v, t in zip(values, total)]
            ax3.bar(x + i*width, percentages, width, label=action)
        
        ax3.set_title('动作分布')
        ax3.set_xticks(x + width*1.5)
        ax3.set_xticklabels(agents)
        ax3.legend()
        
        # 4. 手牌强度分布
        hand_stats = self.stats['hand_stats'][-1]
        for agent_name, hands in hand_stats.items():
            if hands:  # 确保有数据
                ax4.hist(hands, bins=20, alpha=0.5, label=agent_name)
        ax4.set_title('手牌强度分布')
        ax4.set_xlabel('手牌强度')
        ax4.set_ylabel('频率')
        ax4.legend()
        
        plt.tight_layout()
        plt.savefig('evaluation_results.png')
        plt.close()
    
    def evaluate_decision_tree(self, agent):
        """评估决策树模型的性能"""
        dt_strategy = agent.dt_strategy
        
        # 准备数据
        X = np.array(dt_strategy.X_train)
        y_action = np.array(dt_strategy.y_action)
        
        # 计算交叉验证得分
        action_scores = cross_val_score(
            dt_strategy.action_classifier, X, y_action, 
            cv=5, scoring='accuracy'
        )
        
        # 获取特征重要性
        feature_importance = dt_strategy.feature_importance
        
        # 计算混淆矩阵
        y_pred = dt_strategy.action_classifier.predict(X)
        conf_matrix = confusion_matrix(y_action, y_pred)
        
        # 生成分类报告
        class_report = classification_report(
            y_action, y_pred,
            target_names=['fold', 'check', 'call', 'raise']
        )
        
        # 可视化结果
        self._plot_decision_tree_results(
            action_scores, feature_importance, conf_matrix, class_report
        )
        
    def _plot_decision_tree_results(self, action_scores, feature_importance, 
                                  conf_matrix, class_report):
        """可视化决策树模型的评估结果"""
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
        
        # 1. 交叉验证得分
        ax1.boxplot(action_scores)
        ax1.set_title('动作分类器交叉验证得分')
        ax1.set_ylabel('准确率')
        
        # 2. 特征重要性
        features = list(feature_importance.keys())
        importance = list(feature_importance.values())
        y_pos = np.arange(len(features))
        
        ax2.barh(y_pos, importance)
        ax2.set_yticks(y_pos)
        ax2.set_yticklabels(features)
        ax2.set_title('特征重要性')
        
        # 3. 混淆矩阵
        sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                   xticklabels=['fold', 'check', 'call', 'raise'],
                   yticklabels=['fold', 'check', 'call', 'raise'],
                   ax=ax3)
        ax3.set_title('混淆矩阵')
        
        # 4. 分类报告
        ax4.text(0.1, 0.1, class_report,
                fontsize=10, family='monospace')
        ax4.axis('off')
        ax4.set_title('分类报告')
        
        plt.tight_layout()
        plt.savefig('evaluation_decision_tree.png')
        plt.close()

def main():
    # 创建评估器
    evaluator = PokerEvaluator(num_agents=6)
    
    # 训练并评估
    evaluator.train_and_evaluate(
        num_training_games=1000,
        eval_interval=100,
        num_eval_games=100
    )

if __name__ == '__main__':
    main() 