import numpy as np
from typing import List, Dict, Tuple, Any
from poker.card import Card
from poker.evaluator import HandEvaluator
import random
from collections import defaultdict, OrderedDict, Counter
import hashlib
from concurrent.futures import ThreadPoolExecutor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import cross_val_score
import joblib
import os

class BaseStrategy:
    def __init__(self):
        self.evaluator = HandEvaluator()
        # 添加LRU缓存
        self.state_cache = LRUCache(1000)
        self.equity_cache = LRUCache(1000)
        self.action_cache = LRUCache(1000)
        
    def get_action(self, state: dict, hole_cards: List[Card]) -> Tuple[str, int]:
        raise NotImplementedError

class LRUCache:
    """LRU缓存实现"""
    def __init__(self, capacity: int):
        self.capacity = capacity
        self.cache = OrderedDict()
        
    def get(self, key: str) -> Any:
        if key not in self.cache:
            return None
        # 移动到最新
        self.cache.move_to_end(key)
        return self.cache[key]
        
    def put(self, key: str, value: Any):
        if key in self.cache:
            self.cache.move_to_end(key)
        self.cache[key] = value
        if len(self.cache) > self.capacity:
            self.cache.popitem(last=False)

class ICMModel:
    """独立筹码模型(Independent Chip Model)实现"""
    
    def __init__(self):
        self.bubble_factor = 1.5  # 泡沫圈调整因子
        self.final_table_factor = 1.2  # 决赛桌调整因子
        self.short_stack_threshold = 0.2  # 短筹码阈值（相对于平均筹码）
        
        # 添加缓存
        self.equity_cache = LRUCache(1000)
        self.stage_cache = LRUCache(100)
        
    def calculate_icm_equity(self, stacks: List[int], prizes: List[int], 
                           tournament_stage: str = 'normal') -> List[float]:
        # 使用缓存
        cache_key = f"{stacks}_{prizes}_{tournament_stage}"
        cached_result = self.equity_cache.get(cache_key)
        if cached_result is not None:
            return cached_result
            
        # 计算ICM权益
        total_chips = sum(stacks)
        num_players = len(stacks)
        
        if num_players == 1:
            return [sum(prizes)]
            
        # 使用numpy进行向量化计算
        stacks_array = np.array(stacks)
        prizes_array = np.array(prizes)
        
        # 计算基础ICM权益
        prob_first = stacks_array / total_chips
        equities = prob_first * prizes_array[0]
        
        if len(prizes) > 1:
            for i in range(num_players):
                remaining_mask = np.ones(num_players, dtype=bool)
                remaining_mask[i] = False
                remaining_stacks = stacks_array[remaining_mask]
                remaining_equity = self.calculate_icm_equity(
                    remaining_stacks.tolist(),
                    prizes_array[1:].tolist(),
                    tournament_stage
                )
                equities[i] += (1 - prob_first[i]) * sum(remaining_equity)
        
        # 根据锦标赛阶段调整权益
        if tournament_stage == 'bubble':
            equities = self._adjust_bubble_equity(equities, stacks, prizes)
        elif tournament_stage == 'final_table':
            equities = self._adjust_final_table_equity(equities, stacks)
        elif tournament_stage == 'heads_up':
            equities = self._adjust_heads_up_equity(equities, stacks, prizes)
            
        # 缓存结果
        self.equity_cache.put(cache_key, equities)
        return equities
    
    def _adjust_bubble_equity(self, equities: List[float], stacks: List[int], 
                            prizes: List[int]) -> List[float]:
        """调整泡沫圈权益"""
        avg_stack = sum(stacks) / len(stacks)
        min_cash = min(prizes)
        
        adjusted_equities = []
        for i, equity in enumerate(equities):
            if stacks[i] < avg_stack * self.short_stack_threshold:
                # 短筹码更倾向于保守
                adjusted_equities.append(equity * self.bubble_factor)
            else:
                # 大筹码更倾向于激进
                bubble_pressure = min_cash / equity
                adjusted_equities.append(equity / bubble_pressure)
                
        return adjusted_equities
    
    def _adjust_final_table_equity(self, equities: List[float], 
                                 stacks: List[int]) -> List[float]:
        """调整决赛桌权益"""
        avg_stack = sum(stacks) / len(stacks)
        return [equity * self.final_table_factor 
                if stack > avg_stack else equity
                for equity, stack in zip(equities, stacks)]
    
    def _adjust_heads_up_equity(self, equities: List[float], stacks: List[int],
                              prizes: List[int]) -> List[float]:
        """调整单挑局权益"""
        prize_diff = prizes[0] - prizes[1]
        stack_ratio = max(stacks) / min(stacks)
        
        # 根据筹码差距调整权益
        if stack_ratio > 2:  # 大筹码优势明显
            leader_idx = stacks.index(max(stacks))
            equities[leader_idx] *= 1.2
            equities[1 - leader_idx] *= 0.8
            
        return equities
    
    def calculate_call_risk(self, state: dict, player_stack: int, 
                          call_amount: int) -> float:
        """计算跟注风险
        
        Args:
            state: 游戏状态
            player_stack: 玩家筹码量
            call_amount: 需要跟注的金额
            
        Returns:
            基于ICM的风险调整因子(0-1)
        """
        stacks = [p['chips'] for p in state['players']]
        player_idx = state['player_index']
        tournament_stage = self._determine_tournament_stage(state)
        
        # 计算当前权益
        current_equity = self.calculate_icm_equity(
            stacks, state['prizes'], tournament_stage
        )[player_idx]
        
        # 计算输赢后的权益
        lose_stacks = stacks.copy()
        lose_stacks[player_idx] -= call_amount
        losing_equity = self.calculate_icm_equity(
            lose_stacks, state['prizes'], tournament_stage
        )[player_idx]
        
        win_stacks = stacks.copy()
        win_stacks[player_idx] += state['total_pot']
        winning_equity = self.calculate_icm_equity(
            win_stacks, state['prizes'], tournament_stage
        )[player_idx]
        
        # 计算风险调整因子
        equity_risked = current_equity - losing_equity
        equity_gained = winning_equity - current_equity
        
        if equity_gained <= 0:
            return 0
            
        # 根据锦标赛阶段调整风险因子
        risk_factor = min(1.0, equity_gained / equity_risked)
        if tournament_stage == 'bubble':
            risk_factor *= 0.7  # 泡沫圈更保守
        elif tournament_stage == 'final_table':
            risk_factor *= 0.9  # 决赛桌稍微保守
        elif tournament_stage == 'heads_up':
            risk_factor *= 1.2  # 单挑局更激进
            
        return risk_factor
    
    def _determine_tournament_stage(self, state: dict) -> str:
        """确定锦标赛阶段"""
        num_players = len(state['players'])
        num_active = len([p for p in state['players'] if not p['is_eliminated']])
        min_cash_position = len(state['prizes'])
        
        if num_players == 2:
            return 'heads_up'
        elif num_players <= 9:
            return 'final_table'
        elif num_active <= min_cash_position + 2:
            return 'bubble'
        else:
            return 'normal'
    
    def adjust_action_by_icm(self, action: Tuple[str, int], state: dict, 
                           win_prob: float) -> Tuple[str, int]:
        """根据ICM调整行动"""
        if 'prizes' not in state:
            return action
            
        action_type, amount = action
        tournament_stage = self._determine_tournament_stage(state)
        player_stack = state['players'][state['player_index']]['chips']
        
        # 计算风险调整因子
        if action_type in ['call', 'raise']:
            risk_factor = self.calculate_call_risk(state, player_stack, amount)
            stack_ratio = player_stack / sum(p['chips'] for p in state['players'])
            
            # 根据不同情况调整策略
            if tournament_stage == 'bubble':
                if stack_ratio < self.short_stack_threshold:
                    # 短筹码在泡沫圈
                    if win_prob > 0.8:  # 极强牌力时推入
                        return 'raise', player_stack
                    elif risk_factor < 0.3:
                        return 'fold', 0
                else:
                    # 大筹码在泡沫圈可以施加压力
                    if win_prob > 0.4:
                        return action_type, amount
                        
            elif tournament_stage == 'final_table':
                if risk_factor < 0.4 and win_prob < 0.7:
                    return 'fold', 0
                elif risk_factor < 0.6:
                    return 'call', state.get('current_bet', 0)
                    
            elif tournament_stage == 'heads_up':
                if win_prob > 0.6:  # 单挑局更激进
                    return 'raise', min(amount * 1.5, player_stack)
                    
            # 一般情况的调整
            if risk_factor < 0.3 and win_prob < 0.7:
                return 'fold', 0
            elif risk_factor < 0.5 and action_type == 'raise':
                return 'call', state.get('current_bet', 0)
            elif risk_factor < 0.7:
                return action_type, int(amount * risk_factor)
                
        return action

class MultiPlayerGameAnalyzer:
    """多人博弈分析器"""
    
    def __init__(self):
        # 使用更高效的数据结构
        self.range_weights = defaultdict(float)
        self.player_profiles = defaultdict(lambda: {
            'style': defaultdict(float),
            'tendencies': defaultdict(float),
            'adjustments': defaultdict(float),
            'exploitability': defaultdict(float)
        })
        self.action_frequencies = defaultdict(lambda: defaultdict(int))
        self.position_stats = defaultdict(list)
        
        # 添加缓存
        self.dynamics_cache = LRUCache(1000)
        self.action_cache = LRUCache(1000)
        
    def analyze_table_dynamics(self, state: dict) -> Dict:
        # 使用缓存
        cache_key = self._get_state_hash(state)
        cached_result = self.dynamics_cache.get(cache_key)
        if cached_result is not None:
            return cached_result
            
        active_players = [p for p in state['players'] if not p['is_folded']]
        
        # 并行计算各个分析
        with ThreadPoolExecutor(max_workers=4) as executor:
            futures = {
                'stack_distribution': executor.submit(
                    self._analyze_stack_distribution, active_players
                ),
                'position_dynamics': executor.submit(
                    self._analyze_position_dynamics, state
                ),
                'action_tendencies': executor.submit(
                    self._analyze_action_tendencies, state
                ),
                'table_texture': executor.submit(
                    self._analyze_table_texture, state
                ),
                'pressure_points': executor.submit(
                    self._identify_pressure_points, state
                )
            }
            
            dynamics = {
                key: future.result() 
                for key, future in futures.items()
            }
            
        # 缓存结果
        self.dynamics_cache.put(cache_key, dynamics)
        return dynamics
        
    def _get_state_hash(self, state: dict) -> str:
        """生成状态的唯一哈希值"""
        relevant_info = {
            'players': [(p['chips'], p['is_folded']) for p in state['players']],
            'community_cards': state.get('community_cards', []),
            'total_pot': state.get('total_pot', 0),
            'current_round': state.get('current_round', '')
        }
        return hashlib.md5(str(relevant_info).encode()).hexdigest()
    
    def _analyze_stack_distribution(self, players: List[dict]) -> Dict:
        """分析筹码分布"""
        stacks = [p['chips'] for p in players]
        avg_stack = sum(stacks) / len(stacks)
        
        return {
            'stack_ratios': [s/avg_stack for s in stacks],
            'polarization': np.std(stacks) / avg_stack,
            'pressure_index': max(stacks) / min(stacks) if min(stacks) > 0 else float('inf')
        }
    
    def _analyze_position_dynamics(self, state: dict) -> Dict:
        """分析位置动态"""
        position_data = {}
        for pos, stats in self.position_stats.items():
            if not stats:
                continue
            
            position_data[pos] = {
                'vpip': sum(1 for s in stats if s.get('voluntarily_played', False)) / len(stats),
                'pfr': sum(1 for s in stats if s.get('preflop_raise', False)) / len(stats),
                'aggression': sum(1 for s in stats if s.get('aggressive_action', False)) / len(stats),
                'squeeze_frequency': self._calculate_squeeze_frequency(stats)
            }
        
        return position_data
    
    def _analyze_action_tendencies(self, state: dict) -> Dict:
        """分析行动倾向"""
        tendencies = {}
        for player_id, history in self.action_frequencies.items():
            if not history:
                continue
            
            tendencies[player_id] = {
                'fold_to_3bet': self._calculate_fold_to_3bet(history),
                'fold_to_cbet': self._calculate_fold_to_cbet(history),
                'donk_betting': self._calculate_donk_frequency(history),
                'delayed_aggression': self._calculate_delayed_aggression(history)
            }
        
        return tendencies
    
    def _analyze_table_texture(self, state: dict) -> Dict:
        """分析牌桌质地"""
        community_cards = state.get('community_cards', [])
        
        return {
            'board_texture': self._evaluate_board_texture(community_cards),
            'pot_size_ratio': state['total_pot'] / sum(p['chips'] for p in state['players']),
            'action_density': self._calculate_action_density(state),
            'stack_depth_impact': self._evaluate_stack_depth_impact(state)
        }
    
    def _identify_pressure_points(self, state: dict) -> List[Dict]:
        """识别施压点"""
        pressure_points = []
        for i, player in enumerate(state['players']):
            if player['is_folded']:
                continue
                
            vulnerability = self._calculate_vulnerability(state, i)
            if vulnerability > 0.7:  # 高度脆弱
                pressure_points.append({
                    'player_index': i,
                    'vulnerability': vulnerability,
                    'optimal_pressure': self._calculate_optimal_pressure(state, i)
                })
                
        return pressure_points
    
    def update_player_profile(self, player_id: str, action: Dict, state: dict):
        """更新玩家画像"""
        if player_id not in self.player_profiles:
            self.player_profiles[player_id] = {
                'style': {},
                'tendencies': {},
                'adjustments': {},
                'exploitability': {}
            }
        
        profile = self.player_profiles[player_id]
        
        # 更新玩家风格
        self._update_player_style(profile['style'], action, state)
        
        # 更新玩家倾向
        self._update_player_tendencies(profile['tendencies'], action, state)
        
        # 更新玩家调整
        self._update_player_adjustments(profile['adjustments'], action, state)
        
        # 评估可利用性
        profile['exploitability'] = self._evaluate_exploitability(profile)
    
    def get_optimal_action(self, state: dict, hole_cards: List[Card], 
                          position: int) -> Tuple[str, int]:
        """获取最优动作
        
        考虑多人博弈因素，返回最优动作
        """
        # 分析当前牌桌动态
        dynamics = self.analyze_table_dynamics(state)
        
        # 获取位置范围
        position_range = self._get_position_range(position, state)
        
        # 计算相对于范围的手牌强度
        range_strength = self._calculate_range_strength(hole_cards, position_range)
        
        # 分析多人博弈情况
        multiway_factors = self._analyze_multiway_situation(state, dynamics)
        
        # 确定最优动作
        action_type, amount = self._determine_optimal_action(
            state, hole_cards, range_strength, multiway_factors
        )
        
        return action_type, amount
    
    def _get_position_range(self, position: int, state: dict) -> List[Tuple[Card, Card]]:
        """获取位置范围"""
        if position not in self.range_weights:
            self._initialize_position_ranges()
            
        total_players = len(state['players'])
        position_type = self._get_position_type(position, total_players)
        
        return self.range_weights[position_type]
    
    def _calculate_range_strength(self, hole_cards: List[Card], 
                                position_range: List[Tuple[Card, Card]]) -> float:
        """计算相对于范围的手牌强度"""
        wins = 0
        total = 0
        
        for opponent_hand in position_range:
            result = self._evaluate_hand_vs_hand(hole_cards, opponent_hand)
            if result > 0:
                wins += 1
            total += 1
            
        return wins / total if total > 0 else 0
    
    def _analyze_multiway_situation(self, state: dict, 
                                  dynamics: Dict) -> Dict:
        """分析多人博弈情况"""
        active_players = [p for p in state['players'] if not p['is_folded']]
        
        return {
            'num_players': len(active_players),
            'stack_pressure': dynamics['stack_distribution']['pressure_index'],
            'position_advantage': self._calculate_position_advantage(state),
            'action_space': self._analyze_action_space(state, active_players)
        }
    
    def _determine_optimal_action(self, state: dict, hole_cards: List[Card],
                                range_strength: float, 
                                multiway_factors: Dict) -> Tuple[str, int]:
        """确定最优动作"""
        num_players = multiway_factors['num_players']
        position_advantage = multiway_factors['position_advantage']
        
        # 基础动作倾向
        if range_strength > 0.8:  # 非常强的牌
            if num_players > 3:  # 多人底池
                return 'raise', state['total_pot'] // 2  # 保护手牌
            else:
                return 'raise', state['total_pot']  # 获取最大价值
        elif range_strength > 0.6:  # 强牌
            if position_advantage > 0.7:  # 位置优势明显
                return 'raise', state['total_pot'] * 2 // 3
            else:
                return 'call', state.get('current_bet', 0)
        elif range_strength > 0.4:  # 中等强度
            if num_players <= 2:  # 少人底池
                return 'raise', state['total_pot'] // 3
            else:
                return 'call', state.get('current_bet', 0)
        else:  # 弱牌
            if position_advantage > 0.8 and num_players <= 2:
                return 'raise', state['min_raise']  # 尝试诈唬
            else:
                return 'fold', 0
                
class AdaptiveLearningRate:
    """自适应学习率管理器"""
    
    def __init__(self):
        # 基础学习参数
        self.base_lr = 0.001  # 基础学习率
        self.min_lr = 0.0001  # 最小学习率
        self.max_lr = 0.01  # 最大学习率
        
        # 自适应参数
        self.patience = 5  # 容忍多少轮性能没有提升
        self.cooldown = 3  # 调整学习率后的冷却期
        self.factor = 0.5  # 学习率调整因子
        self.warmup_steps = 1000  # 预热步数
        
        # 监控指标
        self.best_performance = float('-inf')
        self.waiting = 0  # 当前等待轮数
        self.cooldown_counter = 0  # 冷却计数器
        self.step_count = 0  # 总步数
        
        # 性能历史
        self.performance_history = []
        self.lr_history = []
        
    def get_lr(self, current_step: int) -> float:
        """获取当前学习率"""
        # 预热阶段
        if current_step < self.warmup_steps:
            return self.min_lr + (self.base_lr - self.min_lr) * (current_step / self.warmup_steps)
            
        # 正常阶段
        if self.cooldown_counter > 0:
            self.cooldown_counter -= 1
            return max(self.min_lr, self.current_lr)
            
        return self.current_lr
    
    def update(self, performance: float) -> float:
        """更新学习率
        
        Args:
            performance: 当前性能指标
            
        Returns:
            新的学习率
        """
        self.step_count += 1
        self.performance_history.append(performance)
        
        # 记录最佳性能
        if performance > self.best_performance:
            self.best_performance = performance
            self.waiting = 0
        else:
            self.waiting += 1
            
        # 检查是否需要调整学习率
        if self.waiting >= self.patience and self.cooldown_counter == 0:
            self._adjust_learning_rate()
            self.waiting = 0
            self.cooldown_counter = self.cooldown
            
        current_lr = self.get_lr(self.step_count)
        self.lr_history.append(current_lr)
        return current_lr
    
    def _adjust_learning_rate(self):
        """调整学习率"""
        # 分析最近的性能趋势
        recent_performance = self.performance_history[-self.patience:]
        performance_trend = np.mean(np.diff(recent_performance))
        
        if performance_trend < 0:  # 性能下降
            self.current_lr *= self.factor
        elif performance_trend > 0:  # 性能提升
            self.current_lr = min(self.current_lr / self.factor, self.max_lr)
            
        self.current_lr = max(self.min_lr, self.current_lr)
    
    def reset(self):
        """重置学习率管理器"""
        self.current_lr = self.base_lr
        self.best_performance = float('-inf')
        self.waiting = 0
        self.cooldown_counter = 0
        self.step_count = 0
        self.performance_history = []
        self.lr_history = []

class StrategyWeightManager:
    """策略权重管理器"""
    
    def __init__(self):
        # 初始权重配置
        self.weights = {
            'gto': 0.4,      # GTO策略权重
            'mes': 0.3,      # MES策略权重
            'icm': 0.2,      # ICM策略权重
            'multiway': 0.1  # 多人博弈策略权重
        }
        
        # 权重调整参数
        self.adjustment_rate = 0.1  # 权重调整速率
        self.min_weight = 0.05  # 最小权重
        self.max_weight = 0.7   # 最大权重
        
        # 性能追踪
        self.strategy_performance = {
            'gto': [],
            'mes': [],
            'icm': [],
            'multiway': []
        }
        
        # EMA参数
        self.ema_alpha = 0.1  # 指数移动平均系数
        self.ema_values = {k: 0.0 for k in self.weights.keys()}
        
    def update_performance(self, strategy_results: Dict[str, float]):
        """更新策略性能
        
        Args:
            strategy_results: 各策略的性能结果
        """
        for strategy, performance in strategy_results.items():
            self.strategy_performance[strategy].append(performance)
            # 更新EMA值
            self.ema_values[strategy] = (
                self.ema_alpha * performance + 
                (1 - self.ema_alpha) * self.ema_values[strategy]
            )
            
    def adjust_weights(self, state: dict = None):
        """调整策略权重"""
        if not all(len(perf) > 0 for perf in self.strategy_performance.values()):
            return
            
        # 计算性能得分
        performance_scores = {}
        for strategy, ema in self.ema_values.items():
            recent_trend = self._calculate_trend(
                self.strategy_performance[strategy][-10:]
            )
            performance_scores[strategy] = ema * (1 + recent_trend)
            
        # 根据场景调整基础分
        if state:
            performance_scores = self._adjust_by_context(performance_scores, state)
            
        # 计算新权重
        total_score = sum(performance_scores.values())
        new_weights = {
            strategy: score / total_score 
            for strategy, score in performance_scores.items()
        }
        
        # 平滑调整
        for strategy in self.weights:
            delta = new_weights[strategy] - self.weights[strategy]
            self.weights[strategy] += self.adjustment_rate * delta
            
        # 确保权重范围
        self._normalize_weights()
        
    def _calculate_trend(self, values: List[float]) -> float:
        """计算趋势"""
        if len(values) < 2:
            return 0
        return np.mean(np.diff(values))
        
    def _adjust_by_context(self, scores: Dict[str, float], 
                          state: dict) -> Dict[str, float]:
        """根据上下文调整分数"""
        # 锦标赛阶段调整
        if 'prizes' in state:  # 锦标赛模式
            tournament_stage = self._determine_tournament_stage(state)
            if tournament_stage == 'bubble':
                scores['icm'] *= 1.5  # 泡沫圈增加ICM权重
            elif tournament_stage == 'heads_up':
                scores['gto'] *= 1.3  # 单挑局增加GTO权重
                
        # 根据玩家数量调整
        active_players = len([p for p in state['players'] if not p['is_folded']])
        if active_players > 3:
            scores['multiway'] *= 1.4  # 多人底池增加多人博弈权重
        elif active_players == 2:
            scores['gto'] *= 1.2  # 单挑增加GTO权重
            
        # 根据对手可利用性调整
        if 'opponent_exploitability' in state:
            if state['opponent_exploitability'] > 0.6:
                scores['mes'] *= 1.5  # 对手可利用性高时增加MES权重
                
        return scores
        
    def _normalize_weights(self):
        """归一化权重"""
        # 确保权重范围
        for strategy in self.weights:
            self.weights[strategy] = max(min(
                self.weights[strategy], 
                self.max_weight
            ), self.min_weight)
            
        # 归一化
        total = sum(self.weights.values())
        for strategy in self.weights:
            self.weights[strategy] /= total
            
    def get_weights(self) -> Dict[str, float]:
        """获取当前权重"""
        return self.weights.copy()
        
    def _determine_tournament_stage(self, state: dict) -> str:
        """确定锦标赛阶段"""
        num_players = len(state['players'])
        num_active = len([p for p in state['players'] if not p['is_eliminated']])
        min_cash_position = len(state['prizes'])
        
        if num_players == 2:
            return 'heads_up'
        elif num_players <= 9:
            return 'final_table'
        elif num_active <= min_cash_position + 2:
            return 'bubble'
        else:
            return 'normal'

class GTOStrategy(BaseStrategy):
    def __init__(self):
        super().__init__()
        self.nash_equilibrium = {}
        self.regret_sum = defaultdict(float)
        self.strategy_sum = defaultdict(float)
        self.iterations = 1000
        self.icm = ICMModel()
        self.multi_player = MultiPlayerGameAnalyzer()
        self.adaptive_lr = AdaptiveLearningRate()
        self.weight_manager = StrategyWeightManager()
        
        # 训练历史使用numpy数组存储
        self.train_history = {
            'loss': np.zeros(1000),
            'reward': np.zeros(1000),
            'learning_rate': np.zeros(1000),
            'strategy_weights': np.zeros((1000, 4))
        }
        self.history_index = 0
        
        # 模型保存路径
        self.model_dir = 'models/gto'
        os.makedirs(self.model_dir, exist_ok=True)
        
    def save_models(self):
        """保存模型到文件"""
        # 保存策略权重
        joblib.dump(self.weight_manager.get_weights(),
                   os.path.join(self.model_dir, 'strategy_weights.joblib'))
        
    def load_models(self):
        """从文件加载模型"""
        model_files = {
            'strategy_weights.joblib': 'strategy_weights'
        }
        
        for filename, attr_name in model_files.items():
            path = os.path.join(self.model_dir, filename)
            if os.path.exists(path):
                setattr(self, attr_name, joblib.load(path))
        
    def train(self, state: dict, action: Tuple[str, int], reward: float):
        # 计算性能并更新
        performance = self._calculate_performance(state, action, reward)
        current_lr = self.adaptive_lr.update(performance)
        loss = self._update_strategy(state, action, reward, current_lr)
        
        # 高效存储训练历史
        if self.history_index >= len(self.train_history['loss']):
            # 动态扩展数组
            self._expand_history_arrays()
            
        self.train_history['loss'][self.history_index] = loss
        self.train_history['reward'][self.history_index] = reward
        self.train_history['learning_rate'][self.history_index] = current_lr
        self.history_index += 1
        
    def _expand_history_arrays(self):
        """动态扩展历史数组"""
        current_size = len(self.train_history['loss'])
        new_size = current_size * 2
        
        self.train_history['loss'] = np.resize(
            self.train_history['loss'], new_size
        )
        self.train_history['reward'] = np.resize(
            self.train_history['reward'], new_size
        )
        self.train_history['learning_rate'] = np.resize(
            self.train_history['learning_rate'], new_size
        )
        self.train_history['strategy_weights'] = np.resize(
            self.train_history['strategy_weights'], 
            (new_size, 4)
        )

    def get_action(self, state: dict, hole_cards: List[Card]) -> Tuple[str, int]:
        # 获取各个策略的建议动作
        gto_action = self._get_gto_action(state, hole_cards)
        mes_action = self._get_mes_action(state, hole_cards)
        icm_action = self._get_icm_action(state, hole_cards)
        multiway_action = self.multi_player.get_optimal_action(
            state, hole_cards, state.get('position', 0)
        )
        
        # 评估各个策略的性能
        strategy_results = {
            'gto': self._evaluate_action_quality(state, gto_action),
            'mes': self._evaluate_exploitation_rate(state, mes_action),
            'icm': self._evaluate_icm_equity(state, icm_action),
            'multiway': self._evaluate_multiway_equity(state, multiway_action)
        }
        
        # 更新策略权重
        self.weight_manager.update_performance(strategy_results)
        self.weight_manager.adjust_weights(state)
        weights = self.weight_manager.get_weights()
        
        # 记录权重历史
        self.train_history['strategy_weights'][self.history_index] = weights
        
        # 选择最终动作
        final_action = self._combine_actions([
            (gto_action, weights['gto']),
            (mes_action, weights['mes']),
            (icm_action, weights['icm']),
            (multiway_action, weights['multiway'])
        ])
        
        # 计算胜率
        win_prob = self.evaluator.calculate_win_probability(
            hole_cards,
            state.get('community_cards', []),
            len([p for p in state['players'] if not p['is_folded']]) - 1
        )
        
        return self.icm.adjust_action_by_icm(final_action, state, win_prob)
        
    def _combine_actions(self, weighted_actions: List[Tuple[Tuple[str, int], float]]) -> Tuple[str, int]:
        """组合多个策略的动作"""
        action_scores = {
            'fold': 0.0,
            'check': 0.0,
            'call': 0.0,
            'raise': 0.0
        }
        
        raise_amounts = []
        
        # 累积各动作的权重得分
        for (action_type, amount), weight in weighted_actions:
            action_scores[action_type] += weight
            if action_type == 'raise':
                raise_amounts.append((amount, weight))
                
        # 选择得分最高的动作类型
        best_action_type = max(action_scores.items(), key=lambda x: x[1])[0]
        
        # 如果是加注，计算加注金额
        if best_action_type == 'raise' and raise_amounts:
            # 使用加权平均计算加注金额
            total_weight = sum(weight for _, weight in raise_amounts)
            amount = int(sum(
                amt * weight for amt, weight in raise_amounts
            ) / total_weight)
        else:
            amount = 0
            
        return best_action_type, amount

class MESStrategy(BaseStrategy):
    def __init__(self):
        super().__init__()
        # 使用更高效的数据结构
        self.opponent_models = defaultdict(lambda: defaultdict(float))
        self.action_history = defaultdict(list)
        self.position_stats = defaultdict(list)
        self.hand_strength_stats = defaultdict(float)
        
        self.icm = ICMModel()
        self.multi_player = MultiPlayerGameAnalyzer()
        self.adaptive_lr = AdaptiveLearningRate()
        self.weight_manager = StrategyWeightManager()
        
        # 使用numpy数组存储训练历史
        self.train_history = {
            'exploitation_rate': np.zeros(1000),
            'model_accuracy': np.zeros(1000),
            'learning_rate': np.zeros(1000),
            'strategy_weights': np.zeros((1000, 4))
        }
        self.history_index = 0
        
        # 模型保存路径
        self.model_dir = 'models/mes'
        os.makedirs(self.model_dir, exist_ok=True)
        
    def save_models(self):
        """保存模型到文件"""
        # 保存策略权重
        joblib.dump(self.weight_manager.get_weights(),
                   os.path.join(self.model_dir, 'strategy_weights.joblib'))
        
    def load_models(self):
        """从文件加载模型"""
        model_files = {
            'strategy_weights.joblib': 'strategy_weights'
        }
        
        for filename, attr_name in model_files.items():
            path = os.path.join(self.model_dir, filename)
            if os.path.exists(path):
                setattr(self, attr_name, joblib.load(path))
        
    def train(self, state: dict, action: Tuple[str, int], reward: float):
        # 计算性能
        performance = self._calculate_exploitation_performance(state, action, reward)
        
        # 更新学习率
        current_lr = self.adaptive_lr.update(performance)
        
        # 更新对手模型
        model_accuracy = self._update_opponent_models(state, action, reward, current_lr)
        
        # 高效存储训练历史
        if self.history_index >= len(self.train_history['exploitation_rate']):
            self._expand_history_arrays()
            
        self.train_history['exploitation_rate'][self.history_index] = performance
        self.train_history['model_accuracy'][self.history_index] = model_accuracy
        self.train_history['learning_rate'][self.history_index] = current_lr
        self.history_index += 1
        
    def _expand_history_arrays(self):
        """动态扩展历史数组"""
        current_size = len(self.train_history['exploitation_rate'])
        new_size = current_size * 2
        
        for key in ['exploitation_rate', 'model_accuracy', 'learning_rate']:
            self.train_history[key] = np.resize(
                self.train_history[key], new_size
            )
        
        self.train_history['strategy_weights'] = np.resize(
            self.train_history['strategy_weights'],
            (new_size, 4)
        )

    def get_action(self, state: dict, hole_cards: List[Card]) -> Tuple[str, int]:
        # 更新所有活跃玩家的画像
        for player in state['players']:
            if not player['is_folded']:
                self.multi_player.update_player_profile(
                    player['id'], player.get('last_action', {}), state
                )
                
        # 获取各个策略的建议动作
        mes_action = self._get_mes_action(state, hole_cards)
        gto_action = self._get_gto_action(state, hole_cards)
        icm_action = self._get_icm_action(state, hole_cards)
        multiway_action = self.multi_player.get_optimal_action(
            state, hole_cards, state.get('position', 0)
        )
        
        # 评估各个策略的性能
        strategy_results = {
            'mes': self._evaluate_exploitation_rate(state, mes_action),
            'gto': self._evaluate_action_quality(state, gto_action),
            'icm': self._evaluate_icm_equity(state, icm_action),
            'multiway': self._evaluate_multiway_equity(state, multiway_action)
        }
        
        # 更新策略权重
        self.weight_manager.update_performance(strategy_results)
        self.weight_manager.adjust_weights(state)
        weights = self.weight_manager.get_weights()
        
        # 记录权重历史
        self.train_history['strategy_weights'][self.history_index] = weights
        
        # 选择最终动作
        final_action = self._combine_actions([
            (mes_action, weights['mes']),
            (gto_action, weights['gto']),
            (icm_action, weights['icm']),
            (multiway_action, weights['multiway'])
        ])
        
        # 计算胜率
        win_prob = self.evaluator.calculate_win_probability(
            hole_cards,
            state.get('community_cards', []),
            len([p for p in state['players'] if not p['is_folded']]) - 1
        )
        
        return self.icm.adjust_action_by_icm(final_action, state, win_prob) 

class DecisionTreeStrategy(BaseStrategy):
    """基于决策树的策略"""
    
    def __init__(self):
        super().__init__()
        # 使用随机森林替代单一决策树
        self.action_classifier = RandomForestClassifier(
            n_estimators=100,
            max_depth=10,
            min_samples_split=10,
            min_samples_leaf=5
        )
        self.bet_regressor = RandomForestRegressor(
            n_estimators=100,
            max_depth=10,
            min_samples_split=10,
            min_samples_leaf=5
        )
        self.scaler = StandardScaler()
        
        # 添加游戏状态相关属性
        self.hole_cards = None
        self.chips = 0
        self.current_bet = 0
        
        # 训练数据
        self.X_train = []
        self.y_action = []
        self.y_bet = []
        
        # 最小训练样本数
        self.min_samples = 100
        
        # 特征重要性记录
        self.feature_importance = {}
        
        # 模型保存路径
        self.model_dir = 'models/decision_tree'
        os.makedirs(self.model_dir, exist_ok=True)
        
    def save_models(self):
        """保存模型到文件"""
        # 保存分类器
        joblib.dump(self.action_classifier, 
                   os.path.join(self.model_dir, 'action_classifier.joblib'))
        # 保存回归器
        joblib.dump(self.bet_regressor,
                   os.path.join(self.model_dir, 'bet_regressor.joblib'))
        # 保存标准化器
        joblib.dump(self.scaler,
                   os.path.join(self.model_dir, 'scaler.joblib'))
        # 保存特征重要性
        joblib.dump(self.feature_importance,
                   os.path.join(self.model_dir, 'feature_importance.joblib'))
                   
    def load_models(self):
        """从文件加载模型"""
        model_files = {
            'action_classifier.joblib': 'action_classifier',
            'bet_regressor.joblib': 'bet_regressor',
            'scaler.joblib': 'scaler',
            'feature_importance.joblib': 'feature_importance'
        }
        
        for filename, attr_name in model_files.items():
            path = os.path.join(self.model_dir, filename)
            if os.path.exists(path):
                setattr(self, attr_name, joblib.load(path))
        
    def _extract_features(self, state: dict, hole_cards: List[Card]) -> np.ndarray:
        """提取更丰富的状态特征"""
        # 基础特征
        hand_strength = self.evaluator.get_hand_strength(hole_cards, state.get('community_cards', []))
        pot_equity = self.evaluator.calculate_pot_equity(
            hole_cards,
            state.get('community_cards', []),
            state['total_pot'],
            len([p for p in state['players'] if not p['is_folded']])-1
        )
        
        # 位置特征
        num_players = len(state['players'])
        position = state['player_index']
        position_ratio = position / num_players
        is_button = position == num_players - 1
        is_blinds = position < 2
        
        # 底池特征
        total_pot = state['total_pot']
        total_chips = sum(p['chips'] for p in state['players'])
        pot_ratio = total_pot / total_chips
        
        # 行动特征
        num_raises = sum(1 for p in state['players'] if p.get('num_raises', 0) > 0)
        num_calls = sum(1 for p in state['players'] if p.get('num_calls', 0) > 0)
        aggression_factor = num_raises / (num_calls + 1)  # 避免除零
        
        # 牌力特征
        community_cards = state.get('community_cards', [])
        hand_potential = self._calculate_hand_potential(hole_cards, community_cards)
        drawing_odds = self._calculate_drawing_odds(hole_cards, community_cards)
        
        # 对手特征
        active_opponents = len([p for p in state['players'] if not p['is_folded']]) - 1
        avg_opponent_stack = sum(p['chips'] for p in state['players'] if not p['is_folded']) / (active_opponents + 1)
        stack_to_pot = self.chips / (total_pot + 1)
        
        # 组合特征
        features = [
            hand_strength,
            pot_equity,
            position_ratio,
            int(is_button),
            int(is_blinds),
            pot_ratio,
            num_raises / num_players,
            num_calls / num_players,
            aggression_factor,
            hand_potential['improvement'],
            hand_potential['deterioration'],
            drawing_odds,
            active_opponents / num_players,
            stack_to_pot,
            self.chips / avg_opponent_stack,
            state.get('min_raise', 0) / total_pot,
            len(community_cards) / 5,
            self.chips / total_chips
        ]
        
        return np.array(features).reshape(1, -1)
        
    def _calculate_hand_potential(self, hole_cards: List[Card], 
                                community_cards: List[Card]) -> Dict[str, float]:
        """计算手牌潜力"""
        if not community_cards:
            return {'improvement': 0.5, 'deterioration': 0.5}
            
        current_rank = self._get_current_hand_rank(hole_cards, community_cards)
        simulations = 100
        improvements = 0
        deteriorations = 0
        
        # 模拟剩余公共牌
        remaining_cards = [card for card in self.evaluator.all_cards 
                         if card not in hole_cards and card not in community_cards]
        
        for _ in range(simulations):
            # 随机选择剩余的公共牌
            remaining = random.sample(remaining_cards, 5 - len(community_cards))
            final_cards = community_cards + remaining
            final_rank = self._get_current_hand_rank(hole_cards, final_cards)
            
            if final_rank > current_rank:
                improvements += 1
            elif final_rank < current_rank:
                deteriorations += 1
                
        return {
            'improvement': improvements / simulations,
            'deterioration': deteriorations / simulations
        }
        
    def _calculate_drawing_odds(self, hole_cards: List[Card], 
                              community_cards: List[Card]) -> float:
        """计算抽牌机会"""
        if len(community_cards) >= 5:
            return 0.0
            
        # 检查是否有潜在的同花或顺子机会
        suits = [card.suit for card in hole_cards + community_cards]
        ranks = sorted([Card.RANKS.index(card.rank) for card in hole_cards + community_cards])
        
        # 同花机会
        suit_counts = Counter(suits)
        max_suit_count = max(suit_counts.values())
        flush_draw = max_suit_count >= 4
        
        # 顺子机会
        straight_draw = False
        for i in range(len(ranks) - 3):
            if ranks[i+3] - ranks[i] <= 4:
                straight_draw = True
                break
                
        # 返回抽牌机会的评分
        if flush_draw and straight_draw:
            return 1.0  # 双抽机会
        elif flush_draw or straight_draw:
            return 0.7  # 单抽机会
        return 0.0
        
    def train(self, state: dict, action: Tuple[str, int], reward: float):
        """训练决策树模型
        
        Args:
            state: 游戏状态
            action: 动作元组(类型和金额)
            reward: 奖励值
        """
        if self.hole_cards is None:
            return
            
        features = self._extract_features(state, self.hole_cards)
        action_type, bet_amount = action
        
        # 记录训练数据
        self.X_train.append(features[0])
        self.y_action.append(['fold', 'check', 'call', 'raise'].index(action_type))
        self.y_bet.append(bet_amount)
        
        # 当样本数足够时训练模型
        if len(self.X_train) >= self.min_samples:
            X = np.array(self.X_train)
            X = self.scaler.fit_transform(X)
            
            # 训练动作分类器
            self.action_classifier.fit(X, self.y_action)
            
            # 训练下注金额回归器(只使用raise动作的数据)
            raise_mask = np.array(self.y_action) == 3
            if np.sum(raise_mask) > 0:
                X_raise = X[raise_mask]
                y_bet_raise = np.array(self.y_bet)[raise_mask]
                self.bet_regressor.fit(X_raise, y_bet_raise)
                
            # 记录特征重要性
            self._update_feature_importance()
            
    def _update_feature_importance(self):
        """更新特征重要性"""
        feature_names = [
            'hand_strength', 'pot_equity', 'position_ratio', 'is_button',
            'is_blinds', 'pot_ratio', 'raise_ratio', 'call_ratio',
            'aggression_factor', 'improvement_potential', 'deterioration_risk',
            'drawing_odds', 'active_opponents_ratio', 'stack_to_pot',
            'relative_stack', 'min_raise_ratio', 'game_stage', 'stack_ratio'
        ]
        
        # 获取分类器的特征重要性
        action_importance = self.action_classifier.feature_importances_
        
        # 获取回归器的特征重要性(如果已训练)
        if hasattr(self.bet_regressor, 'feature_importances_'):
            bet_importance = self.bet_regressor.feature_importances_
        else:
            bet_importance = np.zeros_like(action_importance)
        
        # 综合两个模型的特征重要性
        combined_importance = (action_importance + bet_importance) / 2
        self.feature_importance = dict(zip(feature_names, combined_importance))
        
    def get_action(self, state: dict, hole_cards: List[Card]) -> Tuple[str, int]:
        """获取动作决策
        
        Args:
            state: 游戏状态
            hole_cards: 手牌
            
        Returns:
            动作类型和下注金额的元组
        """
        # 更新当前状态
        self.hole_cards = hole_cards
        player_state = state['players'][state['player_index']]
        self.chips = player_state['chips']
        self.current_bet = player_state.get('current_bet', 0)
        
        # 其余代码保持不变...
        features = self._extract_features(state, hole_cards)
        
        # 如果没有足够的训练数据,使用基础策略
        if len(self.X_train) < self.min_samples:
            return self._base_strategy(state, hole_cards)
            
        # 标准化特征
        features_scaled = self.scaler.transform(features)
        
        # 获取动作类型的概率分布
        action_probs = self.action_classifier.predict_proba(features_scaled)[0]
        
        # 根据概率分布和当前场景选择动作
        action_idx = self._select_action_with_context(action_probs, state)
        action_type = ['fold', 'check', 'call', 'raise'][action_idx]
        
        # 确定下注金额
        if action_type == 'raise':
            # 使用回归器预测基础下注金额
            base_amount = int(self.bet_regressor.predict(features_scaled)[0])
            # 根据场景调整下注金额
            bet_amount = self._adjust_bet_amount(base_amount, state)
        elif action_type == 'call':
            # 跟注金额
            current_max_bet = max(p['current_bet'] for p in state['players'])
            bet_amount = current_max_bet - self.current_bet
        else:
            bet_amount = 0
            
        return action_type, bet_amount

    def _select_action_with_context(self, action_probs: np.ndarray, 
                                  state: dict) -> int:
        """根据上下文调整动作选择"""
        # 基础策略建议
        base_action = self._base_strategy(state, self.hole_cards)[0]
        base_action_idx = ['fold', 'check', 'call', 'raise'].index(base_action)
        
        # 增加基础策略建议的概率
        action_probs[base_action_idx] *= 1.2
        
        # 根据场景调整概率
        if state.get('total_pot', 0) > sum(p['chips'] for p in state['players']) * 0.5:
            # 大底池时增加call和raise的概率
            action_probs[2:] *= 1.3  # call和raise的索引是2和3
        
        # 归一化概率
        action_probs /= action_probs.sum()
        
        # 返回最高概率的动作
        return np.argmax(action_probs)
        
    def _adjust_bet_amount(self, base_amount: int, state: dict) -> int:
        """根据场景调整下注金额"""
        min_raise = state.get('min_raise', 0)
        max_raise = min(self.chips, state.get('max_raise', self.chips))
        
        # 确保在合法范围内
        amount = max(min_raise, min(base_amount, max_raise))
        
        # 根据底池大小调整
        pot_size = state.get('total_pot', 0)
        if amount < pot_size * 0.5:
            amount = int(pot_size * 0.5)  # 确保下注量至少是底池的一半
        elif amount > pot_size * 2:
            amount = int(pot_size * 2)  # 限制最大下注量为底池的两倍
            
        return amount
        
    def _base_strategy(self, state: dict, hole_cards: List[Card]) -> Tuple[str, int]:
        """基础策略(训练数据不足时使用)"""
        hand_strength = self.evaluator.get_hand_strength(hole_cards, state.get('community_cards', []))
        
        if hand_strength > 0.8:
            return 'raise', state['total_pot']
        elif hand_strength > 0.6:
            return 'call', state.get('current_bet', 0)
        elif hand_strength > 0.4:
            if random.random() < 0.3:  # 30%概率诈唬
                return 'raise', state.get('min_raise', 0)
            else:
                return 'call', state.get('current_bet', 0)
        else:
            return 'fold', 0 

    def _get_current_hand_rank(self, hole_cards: List[Card], community_cards: List[Card]) -> int:
        """获取当前手牌等级"""
        if not community_cards:
            return 0
        return self.evaluator.evaluate_hand(hole_cards, community_cards) 