"""
模型推理接口用于决策
"""
import os
import logging
import json
from typing import Dict, List, Optional, Union, Tuple
import torch
import numpy as np

from sichuanmajiang.model.mahjong_model import create_mahjong_model
from sichuanmajiang.data.preprocessor import DataPreprocessor
from sichuanmajiang.engine.utils import extract_features


class MahjongInference:
    """
    麻将推理接口类
    """
    def __init__(self, 
                 model_path: str, 
                 model_type: str = 'policy_network',
                 model_config: Optional[Dict] = None,
                 device: Optional[str] = None):
        """
        初始化推理接口
        
        Args:
            model_path: 模型路径
            model_type: 模型类型
            model_config: 模型配置
            device: 运行设备
        """
        # 设置日志
        self.logger = self._setup_logging()
        self.logger.info(f"初始化推理接口，模型路径: {model_path}")
        
        # 设备
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = device
        self.logger.info(f"使用设备: {self.device}")
        
        # 模型配置
        if model_config is None:
            model_config = {}
        self.model_config = model_config
        self.model_type = model_type
        
        # 构建和加载模型
        self.model = self._build_and_load_model(model_path, model_type, model_config)
        
        # 预处理器
        self.preprocessor = DataPreprocessor()
        
        # 动作映射（根据模型输出索引映射到实际动作）
        self.action_map = self._setup_action_map()
    
    def _setup_logging(self) -> logging.Logger:
        """
        设置日志
        
        Returns:
            日志记录器
        """
        logger = logging.getLogger('MahjongInference')
        logger.setLevel(logging.INFO)
        
        # 控制台日志
        if not logger.handlers:
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            console_handler.setFormatter(formatter)
            logger.addHandler(console_handler)
        
        return logger
    
    def _build_and_load_model(self, model_path: str, model_type: str, model_config: Dict) -> torch.nn.Module:
        """
        构建并加载模型
        
        Args:
            model_path: 模型路径
            model_type: 模型类型
            model_config: 模型配置
            
        Returns:
            模型实例
        """
        self.logger.info(f"构建模型: {model_type}")
        
        # 创建模型
        model = create_mahjong_model(
            model_type=model_type,
            **model_config
        )
        
        # 移动到设备
        model.to(self.device)
        
        # 设置为评估模式
        model.eval()
        
        # 加载模型权重
        self._load_model_weights(model, model_path)
        
        return model
    
    def _load_model_weights(self, model: torch.nn.Module, model_path: str) -> None:
        """
        加载模型权重
        
        Args:
            model: 模型实例
            model_path: 模型路径
        """
        self.logger.info(f"加载模型权重: {model_path}")
        
        try:
            # 加载模型
            checkpoint = torch.load(model_path, map_location=self.device)
            
            # 只加载模型状态
            if 'model_state_dict' in checkpoint:
                model.load_state_dict(checkpoint['model_state_dict'])
            else:
                # 如果是直接保存的模型
                model.load_state_dict(checkpoint)
            
            self.logger.info("模型权重加载成功")
        except Exception as e:
            self.logger.error(f"模型权重加载失败: {str(e)}")
            raise
    
    def _setup_action_map(self) -> List[str]:
        """
        设置动作映射
        
        Returns:
            动作映射列表
        """
        # 四川麻将的基本动作
        return [
            'pass',      # 过牌
            'play',      # 打牌
            'chi',       # 吃
            'peng',      # 碰
            'gang',      # 杠
            'hu'         # 和
        ]
    
    def preprocess_input(self, game_state: Dict) -> torch.Tensor:
        """
        预处理输入数据
        
        Args:
            game_state: 游戏状态字典
            
        Returns:
            预处理后的特征张量
        """
        # 从游戏状态提取特征
        features = extract_features(game_state)
        
        # 转换为NumPy数组
        features_array = np.array(features, dtype=np.float32)
        
        # 预处理（如果需要）
        # features_array = self.preprocessor.preprocess(features_array)
        
        # 转换为张量
        features_tensor = torch.tensor(features_array).to(self.device)
        
        # 确保形状正确（添加批次维度）
        if len(features_tensor.shape) == 1:
            features_tensor = features_tensor.unsqueeze(0)
        
        return features_tensor
    
    def predict(self, game_state: Dict, return_probs: bool = False) -> Union[int, Tuple[int, np.ndarray]]:
        """
        预测动作
        
        Args:
            game_state: 游戏状态字典
            return_probs: 是否返回概率
            
        Returns:
            预测的动作索引，可选返回概率分布
        """
        # 预处理输入
        features = self.preprocess_input(game_state)
        
        # 不计算梯度
        with torch.no_grad():
            # 前向传播
            if self.model_type == 'actor_critic':
                outputs, _ = self.model(features)
            else:
                outputs = self.model(features)
            
            # 计算概率
            if hasattr(torch.nn.functional, 'softmax'):
                probs = torch.nn.functional.softmax(outputs, dim=1)
            else:
                probs = outputs
            
            # 获取预测动作
            _, predicted = torch.max(outputs, 1)
            
            # 转换为NumPy
            predicted_action = predicted.item()
            probs_array = probs.cpu().numpy()[0]
        
        if return_probs:
            return predicted_action, probs_array
        else:
            return predicted_action
    
    def get_best_move(self, game_state: Dict) -> Dict[str, Union[str, float]]:
        """
        获取最佳动作
        
        Args:
            game_state: 游戏状态字典
            
        Returns:
            包含最佳动作和概率的字典
        """
        # 预测动作和概率
        action_idx, probs = self.predict(game_state, return_probs=True)
        
        # 获取动作名称
        action_name = self.action_map[action_idx] if action_idx < len(self.action_map) else f'unknown_{action_idx}'
        
        # 计算每个可能动作的概率
        action_probs = {self.action_map[i]: float(probs[i]) for i in range(len(self.action_map))}
        
        # 返回结果
        result = {
            'best_action': action_name,
            'best_action_index': action_idx,
            'best_action_prob': float(probs[action_idx]),
            'all_actions_prob': action_probs,
            'confidence': float(probs[action_idx])
        }
        
        return result
    
    def get_ranked_moves(self, game_state: Dict, top_k: int = 3) -> List[Dict[str, Union[str, float]]]:
        """
        获取排名靠前的动作
        
        Args:
            game_state: 游戏状态字典
            top_k: 返回前k个动作
            
        Returns:
            排名靠前的动作列表
        """
        # 预测动作和概率
        _, probs = self.predict(game_state, return_probs=True)
        
        # 获取排名
        sorted_indices = np.argsort(probs)[::-1][:top_k]
        
        # 构建结果
        ranked_moves = []
        for idx in sorted_indices:
            action_name = self.action_map[idx] if idx < len(self.action_map) else f'unknown_{idx}'
            move_info = {
                'action': action_name,
                'action_index': int(idx),
                'probability': float(probs[idx]),
                'rank': int(np.where(sorted_indices == idx)[0][0]) + 1
            }
            ranked_moves.append(move_info)
        
        return ranked_moves
    
    def batch_predict(self, game_states: List[Dict], batch_size: int = 32) -> List[int]:
        """
        批量预测
        
        Args:
            game_states: 游戏状态列表
            batch_size: 批量大小
            
        Returns:
            预测的动作索引列表
        """
        results = []
        
        # 分批处理
        for i in range(0, len(game_states), batch_size):
            # 获取批次
            batch_states = game_states[i:i+batch_size]
            
            # 预处理批次
            batch_features = []
            for state in batch_states:
                features = self.preprocess_input(state)
                batch_features.append(features)
            
            # 合并批次
            batch_tensor = torch.cat(batch_features)
            
            # 不计算梯度
            with torch.no_grad():
                # 前向传播
                if self.model_type == 'actor_critic':
                    outputs, _ = self.model(batch_tensor)
                else:
                    outputs = self.model(batch_tensor)
                
                # 获取预测动作
                _, predicted = torch.max(outputs, 1)
                
                # 添加到结果
                results.extend(predicted.tolist())
        
        return results
    
    def save_interface_config(self, output_path: str) -> None:
        """
        保存接口配置
        
        Args:
            output_path: 输出路径
        """
        config = {
            'model_type': self.model_type,
            'model_config': self.model_config,
            'action_map': self.action_map
        }
        
        # 创建目录
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        # 保存配置
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(config, f, ensure_ascii=False, indent=2)
        
        self.logger.info(f"接口配置保存到: {output_path}")
    
    def load_interface_config(self, config_path: str) -> None:
        """
        加载接口配置
        
        Args:
            config_path: 配置路径
        """
        # 加载配置
        with open(config_path, 'r', encoding='utf-8') as f:
            config = json.load(f)
        
        # 更新配置
        self.model_type = config.get('model_type', self.model_type)
        self.model_config = config.get('model_config', self.model_config)
        self.action_map = config.get('action_map', self.action_map)
        
        self.logger.info(f"接口配置从 {config_path} 加载成功")


class MahjongDecisionMaker:
    """
    麻将决策器类
    结合规则和模型提供最终决策
    """
    def __init__(self, inference_interface: MahjongInference, rule_engine=None):
        """
        初始化决策器
        
        Args:
            inference_interface: 推理接口实例
            rule_engine: 规则引擎实例
        """
        self.inference = inference_interface
        self.rule_engine = rule_engine
        self.logger = self.inference.logger
    
    def make_decision(self, game_state: Dict, use_rules: bool = True) -> Dict[str, Union[str, float, int, List]]:
        """
        做出决策
        
        Args:
            game_state: 游戏状态字典
            use_rules: 是否使用规则引擎过滤
            
        Returns:
            决策结果字典
        """
        # 获取模型推荐的最佳动作
        model_decision = self.inference.get_best_move(game_state)
        
        # 获取排名靠前的动作
        ranked_moves = self.inference.get_ranked_moves(game_state, top_k=5)
        
        # 如果使用规则引擎，检查动作是否合法
        if use_rules and self.rule_engine:
            # 获取当前玩家
            current_player = game_state.get('current_player', 0)
            
            # 获取玩家手牌
            player_hand = game_state.get('player_hands', [[]])[current_player]
            
            # 获取当前打出的牌
            last_played_card = game_state.get('last_played_card')
            
            # 检查每个推荐动作是否合法
            valid_moves = []
            for move in ranked_moves:
                action = move['action']
                
                # 根据动作类型进行检查
                is_valid = False
                
                if action == 'play':
                    # 打出牌总是合法的（假设牌在手牌中）
                    is_valid = True
                elif action == 'chi' and last_played_card:
                    # 检查是否可以吃
                    is_valid = self.rule_engine.can_chi(player_hand, last_played_card)
                elif action == 'peng' and last_played_card:
                    # 检查是否可以碰
                    is_valid = self.rule_engine.can_peng(player_hand, last_played_card)
                elif action == 'gang' and last_played_card:
                    # 检查是否可以杠
                    is_valid = self.rule_engine.can_gang(player_hand, last_played_card)
                elif action == 'hu' and last_played_card:
                    # 检查是否可以和
                    is_valid = self.rule_engine.can_hu(player_hand, last_played_card)
                elif action == 'pass':
                    # 过牌总是合法的
                    is_valid = True
                
                if is_valid:
                    valid_moves.append(move)
            
            # 如果有合法动作，使用第一个作为最终决策
            if valid_moves:
                final_decision = valid_moves[0]
                decision_info = {
                    'final_action': final_decision['action'],
                    'final_action_index': final_decision['action_index'],
                    'probability': final_decision['probability'],
                    'model_recommendation': model_decision,
                    'valid_moves': valid_moves,
                    'all_ranked_moves': ranked_moves,
                    'decision_source': 'rule_filtered_model'
                }
            else:
                # 如果没有合法动作，默认过牌
                decision_info = {
                    'final_action': 'pass',
                    'final_action_index': 0,
                    'probability': 1.0,
                    'model_recommendation': model_decision,
                    'valid_moves': [],
                    'all_ranked_moves': ranked_moves,
                    'decision_source': 'default_pass'
                }
        else:
            # 不使用规则过滤，直接使用模型推荐
            final_decision = ranked_moves[0]
            decision_info = {
                'final_action': final_decision['action'],
                'final_action_index': final_decision['action_index'],
                'probability': final_decision['probability'],
                'model_recommendation': model_decision,
                'all_ranked_moves': ranked_moves,
                'decision_source': 'model_only'
            }
        
        return decision_info
    
    def make_detailed_decision(self, game_state: Dict) -> Dict:
        """
        做出详细决策（包含更多信息）
        
        Args:
            game_state: 游戏状态字典
            
        Returns:
            详细决策结果
        """
        # 获取基本决策
        basic_decision = self.make_decision(game_state)
        
        # 添加额外信息
        detailed_decision = {
            **basic_decision,
            'timestamp': game_state.get('timestamp', int(time.time())),
            'game_round': game_state.get('game_round', 0),
            'player_position': game_state.get('current_player', 0),
            'confidence_level': self._calculate_confidence_level(basic_decision['probability'])
        }
        
        # 计算动作建议
        if self.rule_engine:
            detailed_decision['strategy_advice'] = self._generate_strategy_advice(
                game_state, basic_decision['final_action']
            )
        
        return detailed_decision
    
    def _calculate_confidence_level(self, probability: float) -> str:
        """
        计算置信度级别
        
        Args:
            probability: 概率值
            
        Returns:
            置信度级别
        """
        if probability >= 0.8:
            return 'high'
        elif probability >= 0.6:
            return 'medium_high'
        elif probability >= 0.4:
            return 'medium'
        elif probability >= 0.2:
            return 'medium_low'
        else:
            return 'low'
    
    def _generate_strategy_advice(self, game_state: Dict, action: str) -> str:
        """
        生成策略建议
        
        Args:
            game_state: 游戏状态字典
            action: 动作
            
        Returns:
            策略建议
        """
        # 简单的策略建议生成
        if action == 'hu':
            return '和牌是最佳选择'
        elif action == 'gang':
            return '杠牌可以增加得分机会'
        elif action == 'peng':
            return '碰牌可以加速手牌成型'
        elif action == 'chi':
            return '吃牌可以调整手牌结构'
        elif action == 'play':
            return '打出一张牌'
        else:
            return '选择过牌'


def create_inference_interface(model_path: str, 
                               model_type: str = 'policy_network',
                               model_config: Optional[Dict] = None,
                               device: Optional[str] = None) -> MahjongInference:
    """
    创建推理接口的便捷函数
    
    Args:
        model_path: 模型路径
        model_type: 模型类型
        model_config: 模型配置
        device: 运行设备
        
    Returns:
        推理接口实例
    """
    return MahjongInference(
        model_path=model_path,
        model_type=model_type,
        model_config=model_config,
        device=device
    )


def create_decision_maker(model_path: str, 
                          model_type: str = 'policy_network',
                          model_config: Optional[Dict] = None,
                          device: Optional[str] = None,
                          rule_engine=None) -> MahjongDecisionMaker:
    """
    创建决策器的便捷函数
    
    Args:
        model_path: 模型路径
        model_type: 模型类型
        model_config: 模型配置
        device: 运行设备
        rule_engine: 规则引擎实例
        
    Returns:
        决策器实例
    """
    # 创建推理接口
    inference_interface = create_inference_interface(
        model_path=model_path,
        model_type=model_type,
        model_config=model_config,
        device=device
    )
    
    # 创建决策器
    return MahjongDecisionMaker(
        inference_interface=inference_interface,
        rule_engine=rule_engine
    )


# 导入必要的模块
import time


def main() -> None:
    """
    主函数（用于测试）
    """
    import argparse
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='麻将推理接口')
    parser.add_argument('--model', type=str, help='模型路径')
    parser.add_argument('--model-type', type=str, default='policy_network', help='模型类型')
    parser.add_argument('--device', type=str, help='运行设备')
    
    args = parser.parse_args()
    
    # 如果没有提供模型路径，使用默认值
    if not args.model:
        print("请提供模型路径")
        return
    
    # 创建推理接口
    inference = create_inference_interface(
        model_path=args.model,
        model_type=args.model_type,
        device=args.device
    )
    
    # 创建一个简单的测试游戏状态
    test_game_state = {
        'player_hands': [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7]],
        'current_player': 0,
        'last_played_card': None,
        'game_round': 0,
        'timestamp': int(time.time())
    }
    
    # 预测
    decision = inference.get_best_move(test_game_state)
    
    # 打印结果
    print("\n最佳决策:")
    print(f"动作: {decision['best_action']}")
    print(f"概率: {decision['best_action_prob']:.4f}")
    
    print("\n所有动作概率:")
    for action, prob in decision['all_actions_prob'].items():
        print(f"{action}: {prob:.4f}")
    
    # 获取排名靠前的动作
    ranked_moves = inference.get_ranked_moves(test_game_state)
    print("\n排名靠前的动作:")
    for move in ranked_moves:
        print(f"{move['rank']}. {move['action']} (概率: {move['probability']:.4f})")


if __name__ == '__main__':
    main()
