import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from collections import deque
from typing import List, Dict, Tuple
import torch.nn.functional as F
import os
import torch.cuda.amp as amp  # 添加自动混合精度支持

from .models import PokerNet
from .strategy import GTOStrategy, MESStrategy, DecisionTreeStrategy
from poker.player import AIPlayer
from poker.card import Card
from poker.hand_evaluator import evaluate_hand
from poker.evaluator import HandEvaluator
from poker.pot import PotManager

# 设置设备和随机种子
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
print(f"Using device: {device}")

class DQNAgent(AIPlayer):
    """使用DQN算法的AI代理"""
    
    def __init__(self, name: str, initial_chips: int = 1000):
        super().__init__(name, initial_chips)
        
        # 保存初始筹码量
        self.initial_chips = initial_chips
        
        # 神经网络相关
        self.input_dim = self._get_state_dim()
        self.policy_net = PokerNet(self.input_dim).to(device)
        self.target_net = PokerNet(self.input_dim).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        
        # 优化器和学习率调度器
        self.optimizer = torch.optim.Adam(self.policy_net.parameters(), lr=0.001)
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='max', factor=0.5, patience=5, verbose=True
        )
        
        # 自动混合精度
        self.scaler = amp.GradScaler()
        
        # 经验回放
        self.memory = deque(maxlen=100000)  # 增加回放缓冲区大小
        self.priority_memory = deque(maxlen=10000)  # 优先经验回放
        
        # 训练相关参数
        self.batch_size = 128  # 增加批量大小
        self.gamma = 0.99
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.target_update = 10
        self.training_step = 0
        self.update_frequency = 4  # 每4步更新一次网络
        
        # 策略组件
        self.gto_strategy = GTOStrategy()
        self.mes_strategy = MESStrategy()
        self.dt_strategy = DecisionTreeStrategy()
        
        # 策略权重
        self.strategy_weights = {
            'dqn': 0.4,
            'gto': 0.2,
            'mes': 0.2,
            'dt': 0.2
        }
        
        # 牌型评估器
        self.evaluator = HandEvaluator()
        
        # 得分追踪
        self.total_reward = 0  # 总奖励
        self.current_reward = 0  # 当前游戏奖励
        self.total_profit = 0  # 总盈利
        self.profit_history = []  # 盈利历史
        self.win_history = []  # 胜负历史
        self.games_played = 0  # 已玩游戏数
        
        # 训练状态
        self.train_metrics = {
            'loss': [],
            'reward': [],
            'q_values': [],
            'learning_rates': []
        }
        
        # 添加最小加注额
        self.min_raise = 20  # 设置为大盲注的值
    
    def _get_state_dim(self) -> int:
        """计算状态向量维度"""
        # 2张底牌 (每张牌用花色和点数编码，共4+13=17维)
        hole_cards_dim = 2 * 17
        # 最多5张公共牌
        community_cards_dim = 5 * 17
        # 其他信息：筹码量、当前下注、底池、最小加注等
        other_info_dim = 10
        # 新增：手牌强度、胜率、潜力等信息
        hand_info_dim = 5
        return hole_cards_dim + community_cards_dim + other_info_dim + hand_info_dim
    
    def _encode_card(self, card: Card) -> np.ndarray:
        """将扑克牌编码为向量"""
        suit_onehot = np.zeros(4)
        rank_onehot = np.zeros(13)
        
        suit_onehot[Card.SUITS.index(card.suit)] = 1
        rank_onehot[Card.RANKS.index(card.rank)] = 1
        
        return np.concatenate([suit_onehot, rank_onehot])
    
    def _encode_state(self, state: dict) -> torch.Tensor:
        """将游戏状态编码为向量"""
        # 编码底牌
        state_vec = []
        for card in self.hole_cards:
            state_vec.extend(self._encode_card(card))
        
        # 编码公共牌
        community_cards = state['community_cards']
        for i in range(5):
            if i < len(community_cards):
                state_vec.extend(self._encode_card(community_cards[i]))
            else:
                state_vec.extend(np.zeros(17))  # 补零
        
        # 添加其他信息
        state_vec.extend([
            self.chips / 1000,  # 归一化筹码量
            self.current_bet / 1000,
            state['total_pot'] / 1000,
            state['min_raise'] / 1000,
            self.position / 6,  # 假设最多6个玩家
            int(self.is_folded),
            int(self.is_all_in)
        ])
        
        # 添加其他玩家信息
        players_info = state['players']
        total_players = len(players_info)
        
        avg_chips = sum(p['chips'] for p in players_info) / total_players
        avg_bet = sum(p['current_bet'] for p in players_info) / total_players
        active_players = sum(1 for p in players_info if not p['is_folded'])
        
        state_vec.extend([
            avg_chips / 1000,
            avg_bet / 1000,
            active_players / total_players
        ])
        
        # 添加手牌评估信息
        hand_strength = self.evaluator.get_hand_strength(self.hole_cards, community_cards)
        win_prob = self.evaluator.calculate_win_probability(
            self.hole_cards, community_cards, 
            num_opponents=active_players-1, 
            num_simulations=100
        )
        
        # 计算底池权益
        pot_equity = win_prob * state['total_pot'] / self.chips if self.chips > 0 else 0
        
        # 计算潜在牌型概率
        hand_probs = self.evaluator.calculate_hand_probabilities(
            self.hole_cards, community_cards, num_simulations=100
        )
        improvement_potential = sum(
            prob for hand_type, prob in hand_probs.items()
            if _get_hand_rank(hand_type) > _get_current_hand_rank(self.hole_cards, community_cards)
        )
        
        state_vec.extend([
            hand_strength,
            win_prob,
            pot_equity,
            improvement_potential,
            len(community_cards) / 5  # 游戏进展阶段
        ])
        
        return torch.FloatTensor(state_vec).unsqueeze(0).to(device)
    
    def act(self, state: dict) -> Tuple[str, int]:
        """选择动作,综合考虑多个策略"""
        if random.random() < self.epsilon:
            # 探索模式
            return self._explore(state)
        
        # 获取各个策略的建议动作
        dqn_action = self._get_dqn_action(state)
        gto_action = self.gto_strategy.get_action(state, self.hole_cards)
        mes_action = self.mes_strategy.get_action(state, self.hole_cards)
        dt_action = self.dt_strategy.get_action(state, self.hole_cards)  # 新增决策树策略动作
        
        # 计算加权得分
        actions = {
            'dqn': dqn_action,
            'gto': gto_action,
            'mes': mes_action,
            'dt': dt_action  # 新增决策树策略动作
        }
        
        # 评估每个动作的期望价值
        action_values = {}
        for strategy_name, action in actions.items():
            action_type, bet_amount = action
            value = self._evaluate_action(state, action_type, bet_amount)
            action_values[action] = value * self.strategy_weights[strategy_name]
        
        # 选择最高价值的动作
        best_action = max(action_values.items(), key=lambda x: x[1])[0]
        return best_action
    
    def _get_dqn_action(self, state: dict) -> Tuple[str, int]:
        """获取DQN网络的建议动作"""
        with torch.no_grad():
            state_tensor = self._encode_state(state)
            action_probs, value, bet_amount = self.policy_net(state_tensor)
            action_idx = torch.argmax(action_probs).item()
            actions = ['fold', 'check', 'call', 'raise']
            action_type = actions[action_idx]
            
            if action_type == 'raise':
                bet_amount = self._calculate_raise_amount(state, value.item())
            elif action_type == 'call':
                bet_amount = max(p['current_bet'] for p in state['players']) - self.current_bet
            else:
                bet_amount = 0
                
            return action_type, bet_amount
    
    def _evaluate_action(self, state: dict, action_type: str, bet_amount: int) -> float:
        """评估动作的期望价值"""
        # 计算底池权益
        pot_equity = self.evaluator.calculate_pot_equity(
            self.hole_cards,
            state['community_cards'],
            state['total_pot'],
            len([p for p in state['players'] if not p['is_folded']])-1
        )
        
        # 计算行动的风险调整收益
        if action_type == 'fold':
            return -state['total_pot'] * 0.1  # 弃牌有小概率是最优选择
        elif action_type == 'call':
            return pot_equity - bet_amount
        elif action_type == 'raise':
            # 加注需要考虑对手弃牌概率带来的即时收益
            fold_equity = self._calculate_fold_equity(state, bet_amount)
            return pot_equity + fold_equity - bet_amount
        else:  # check
            return pot_equity
    
    def _calculate_fold_equity(self, state: dict, bet_amount: int) -> float:
        """计算加注时的弃牌权益"""
        active_opponents = len([p for p in state['players'] if not p['is_folded']]) - 1
        if active_opponents == 0:
            return 0
            
        # 估计对手弃牌概率
        fold_prob = min(0.8, bet_amount / (state['total_pot'] + bet_amount))
        return state['total_pot'] * fold_prob
    
    def remember(self, state: dict, action: tuple, reward: float, 
                next_state: dict, done: bool):
        """记录经验"""
        # 计算调整后的奖励
        adjusted_reward = self._adjust_reward(state, action, reward, next_state)
        
        # 将经验添加到回放缓冲区
        self.memory.append((state, action, adjusted_reward, next_state, done))
        
        # 只在重要经验时添加到优先缓冲区
        if abs(adjusted_reward) > 1.0:
            self.priority_memory.append((state, action, adjusted_reward, next_state, done))
        
        # 更新奖励统计
        self.current_reward += adjusted_reward
        self.total_reward += adjusted_reward
        self.train_metrics['reward'].append(adjusted_reward)
        
        if done:
            profit = self.chips - self.initial_chips
            self.total_profit += profit
            self.profit_history.append(profit)
            self.win_history.append(1 if profit > 0 else 0)
            self.games_played += 1
            self.current_reward = 0
            
            # 只在累积足够样本时进行训练
            if len(self.memory) >= self.batch_size * 4:
                self.train()
    
    def _adjust_reward(self, state: dict, action: tuple, reward: float, next_state: dict) -> float:
        """调整奖励以反映决策质量"""
        action_type, bet_amount = action
        
        # 1. 主要奖励信号（筹码变化相对于底池的比例）
        R_chips = reward / (state['total_pot'] + 1e-8)  # 避免除以0
        
        # 2. 辅助奖励信号（基于胜率和行动）
        win_prob = self.evaluator.calculate_win_probability(
            self.hole_cards, state['community_cards'],
            num_opponents=len(state['players'])-1
        )
        
        # 不同行动的权重
        action_weights = {
            'fold': 0.8,
            'check': 1.0,
            'call': 1.0,
            'raise': 1.2
        }
        R_win_rate = win_prob * action_weights[action_type]
        
        # 3. 长期表现奖励（基于排名）
        # 计算当前玩家在所有玩家中的筹码排名
        chips_list = [p['chips'] for p in state['players']]
        # 使用当前玩家的筹码量
        my_chips = self.chips
        rank = sum(1 for x in chips_list if x > my_chips)
        R_ranking = (len(chips_list) - rank) / len(chips_list)
        
        # 权重设置
        w1, w2, w3 = 0.6, 0.3, 0.1
        
        # 最终奖励
        adjusted_reward = (
            w1 * R_chips +
            w2 * R_win_rate +
            w3 * R_ranking
        )
        
        # 特殊情况处理
        if action_type == 'fold' and win_prob > 0.7:
            # 惩罚放弃强牌
            adjusted_reward *= 0.5
        elif action_type == 'raise' and win_prob < 0.2 and bet_amount > state['min_raise']:
            # 惩罚用弱牌大注
            adjusted_reward *= 0.7
        elif action_type == 'call' and win_prob > 0.8:
            # 惩罚用特别强的牌只跟注
            adjusted_reward *= 0.8
            
        return adjusted_reward
    
    def _encode_state_batch(self, states: List[dict]) -> torch.Tensor:
        """批量编码状态"""
        batch_size = len(states)
        # 预分配numpy数组
        encoded_states = np.zeros((batch_size, self.input_dim))
        
        # 并行处理卡牌编码
        for i, state in enumerate(states):
            if state is None:  # 处理None状态
                continue
                
            # 编码底牌
            hole_cards = self.hole_cards
            start_idx = 0
            for card in hole_cards:
                card_vec = self._encode_card(card)
                encoded_states[i, start_idx:start_idx + 17] = card_vec
                start_idx += 17
            
            # 编码公共牌
            community_cards = state.get('community_cards', [])
            for j in range(5):
                if j < len(community_cards):
                    card_vec = self._encode_card(community_cards[j])
                else:
                    card_vec = np.zeros(17)
                encoded_states[i, start_idx:start_idx + 17] = card_vec
                start_idx += 17
            
            # 编码其他信息
            # 找到当前玩家的状态
            current_player = None
            for idx, player in enumerate(state['players']):
                if player.get('name') == self.name:
                    current_player = player
                    break
            
            if current_player is None:  # 如果找不到当前玩家，使用第一个玩家
                current_player = state['players'][0]
            
            # 计算位置信息
            position = state.get('position', 0)
            
            encoded_states[i, start_idx:start_idx + 10] = [
                current_player['chips'] / 1000,
                current_player.get('current_bet', 0) / 1000,
                state.get('total_pot', 0) / 1000,
                state.get('min_raise', 0) / 1000,
                position / 6,
                int(current_player.get('is_folded', False)),
                int(current_player.get('is_all_in', False)),
                np.mean([p['chips'] for p in state['players']]) / 1000,
                np.mean([p.get('current_bet', 0) for p in state['players']]) / 1000,
                sum(1 for p in state['players'] if not p.get('is_folded', False)) / len(state['players'])
            ]
            start_idx += 10
            
            # 编码手牌评估信息
            hand_strength = self.evaluator.get_hand_strength(hole_cards, community_cards)
            win_prob = self.evaluator.calculate_win_probability(
                hole_cards, community_cards, 
                num_opponents=sum(1 for p in state['players'] if not p.get('is_folded', False))-1
            )
            pot_equity = win_prob * state.get('total_pot', 0) / (current_player['chips'] + 1e-8)
            
            encoded_states[i, start_idx:] = [
                hand_strength,
                win_prob,
                pot_equity,
                len(community_cards) / 5,
                state.get('current_round', 0) / 4
            ]
            
        # 转换为tensor并移到GPU
        return torch.FloatTensor(encoded_states).to(device)
    
    def train(self):
        """训练模型"""
        if len(self.memory) < self.batch_size:
            return
            
        # 批量采样，减少采样频率
        num_batches = 4  # 每次训练处理4个批次
        for _ in range(num_batches):
            # 使用普通经验回放
            batch = random.sample(self.memory, self.batch_size)
            
            # 将batch解包成单独的列表
            states, actions, rewards, next_states, dones = map(list, zip(*batch))
            
            # 批量编码状态
            states_tensor = self._encode_state_batch(states)
            next_states_tensor = self._encode_state_batch(next_states)
            
            # 转换其他数据到GPU
            actions_tensor = torch.tensor([['fold', 'check', 'call', 'raise'].index(a[0])
                                         for a in actions], device=device)
            rewards_tensor = torch.tensor(rewards, dtype=torch.float32, device=device)
            dones_tensor = torch.tensor(dones, dtype=torch.float32, device=device)
            bet_amounts_tensor = torch.tensor([a[1] for a in actions], 
                                            dtype=torch.float32, device=device)
            
            # 使用自动混合精度训练
            with amp.autocast():
                # 计算当前Q值
                current_q_values, _, current_bet_preds = self.policy_net(states_tensor)
                current_q_values = current_q_values.gather(1, actions_tensor.unsqueeze(1))
                
                # 计算目标Q值
                with torch.no_grad():
                    next_q_values, _, _ = self.target_net(next_states_tensor)
                    next_q_values = next_q_values.max(1)[0]
                    target_q_values = rewards_tensor + (1 - dones_tensor) * self.gamma * next_q_values
                    
                # 计算损失
                q_loss = F.smooth_l1_loss(current_q_values.squeeze(), target_q_values)
                bet_loss = F.smooth_l1_loss(current_bet_preds.squeeze(), bet_amounts_tensor)
                total_loss = q_loss + 0.5 * bet_loss
            
            # 优化模型
            self.optimizer.zero_grad(set_to_none=True)
            self.scaler.scale(total_loss).backward()
            self.scaler.step(self.optimizer)
            self.scaler.update()
            
            # 记录训练指标
            self.train_metrics['loss'].append(total_loss.item())
            self.train_metrics['q_values'].append(current_q_values.mean().item())
            
            # 更新训练步数
            self.training_step += 1
            
            # 使用torch.cuda.synchronize()确保GPU操作完成
            if torch.cuda.is_available():
                torch.cuda.synchronize()
            
        # 更新目标网络（降低更新频率）
        if self.training_step % (self.target_update * 2) == 0:
            self.target_net.load_state_dict(self.policy_net.state_dict())
        
        # 更新学习率和探索率（降低更新频率）
        if self.training_step % 2000 == 0:
            avg_reward = np.mean(self.train_metrics['reward'][-2000:])
            self.scheduler.step(avg_reward)
            self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)
            self.train_metrics['learning_rates'].append(self.optimizer.param_groups[0]['lr'])
        
        # 减少决策树训练频率
        if self.training_step % (self.update_frequency * 2) == 0:
            for state, action, reward in zip(states[:10], actions[:10], rewards[:10]):  # 只使用部分数据训练决策树
                self.dt_strategy.train(state, action, reward)
    
    def reset(self):
        """重置玩家状态"""
        super().reset()
        # 不重置累计统计信息，只重置当前游戏相关的状态
        self.current_reward = 0
    
    def get_stats(self) -> dict:
        """获取玩家统计信息"""
        recent_games = 50  # 计算近期胜率使用的游戏数
        
        # 计算近期胜率
        if len(self.win_history) > 0:
            recent_wins = self.win_history[-recent_games:] if len(self.win_history) >= recent_games else self.win_history
            recent_win_rate = sum(recent_wins) / len(recent_wins)
        else:
            recent_win_rate = 0
            
        return {
            'total_reward': self.total_reward,
            'current_reward': self.current_reward,
            'total_profit': self.total_profit,
            'games_played': self.games_played,
            'win_rate': sum(self.win_history) / max(1, len(self.win_history)),
            'recent_win_rate': recent_win_rate,
            'average_profit': self.total_profit / max(1, self.games_played)
        }
    
    def save(self, path: str):
        """保存模型"""
        torch.save({
            'policy_net_state_dict': self.policy_net.state_dict(),
            'target_net_state_dict': self.target_net.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'scaler_state_dict': self.scaler.state_dict(),
            'epsilon': self.epsilon,
            'training_step': self.training_step,
            'train_metrics': self.train_metrics
        }, path)
    
    def load(self, path: str):
        """加载模型"""
        if os.path.exists(path):
            checkpoint = torch.load(path, map_location=device)
            self.policy_net.load_state_dict(checkpoint['policy_net_state_dict'])
            self.target_net.load_state_dict(checkpoint['target_net_state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
            self.scaler.load_state_dict(checkpoint['scaler_state_dict'])
            self.epsilon = checkpoint['epsilon']
            self.training_step = checkpoint['training_step']
            self.train_metrics = checkpoint['train_metrics']
            
            # 确保模型在正确的设备上
            self.policy_net = self.policy_net.to(device)
            self.target_net = self.target_net.to(device)

    def _explore(self, state: dict) -> Tuple[str, int]:
        """随机探索动作"""
        actions = ['fold', 'check', 'call', 'raise']
        action_type = random.choice(actions)
        
        if action_type == 'raise':
            # 在最小加注和全部筹码之间随机选择
            min_raise = state.get('min_raise', self.min_raise)
            max_raise = min(self.chips, state.get('max_raise', self.chips))
            if max_raise <= min_raise:  # 如果没有足够的筹码加注
                action_type = 'call'
                bet_amount = state.get('current_bet', 0) - self.current_bet
            else:
                bet_amount = random.randint(min_raise, max_raise)
        elif action_type == 'call':
            # 跟注金额
            bet_amount = state.get('current_bet', 0) - self.current_bet
            if bet_amount > self.chips:  # 如果没有足够的筹码跟注
                action_type = 'fold'
                bet_amount = 0
        else:
            bet_amount = 0
            
        return action_type, bet_amount

def _get_hand_rank(hand_type: str) -> int:
    """获取牌型等级"""
    ranks = {
        "皇家同花顺": 9,
        "同花顺": 8,
        "四条": 7,
        "葫芦": 6,
        "同花": 5,
        "顺子": 4,
        "三条": 3,
        "两对": 2,
        "一对": 1,
        "高牌": 0
    }
    return ranks[hand_type]

def _get_current_hand_rank(hole_cards: List[Card], 
                          community_cards: List[Card]) -> int:
    """获取当前牌型等级"""
    if not community_cards:
        return 0
    hand = evaluate_hand(hole_cards, community_cards)
    return _get_hand_rank(hand.hand_type) 