"""
模型服务接口
将推理功能包装成可部署的服务
"""
import os
import time
import json
import logging
import threading
from typing import Dict, List, Optional, Union, Tuple
from datetime import datetime

import torch
import numpy as np

# 导入推理接口
from sichuanmajiang.model.inference import MahjongInference, MahjongDecisionMaker
from sichuanmajiang.model.inference_simple import SimpleMahjongInference


class MahjongModelService:
    """
    麻将模型服务类
    """
    def __init__(self, 
                 model_config: Dict,
                 service_config: Optional[Dict] = None):
        """
        初始化模型服务
        
        Args:
            model_config: 模型配置
            service_config: 服务配置
        """
        # 设置日志
        self.logger = self._setup_logging()
        self.logger.info("初始化麻将模型服务")
        
        # 配置
        self.model_config = model_config
        self.service_config = service_config or {}
        
        # 加载配置
        self._load_config()
        
        # 模型实例
        self.models = {}
        self.lock = threading.RLock()  # 线程锁，用于保护模型加载
        
        # 性能指标
        self.performance_metrics = {
            'total_requests': 0,
            'total_time': 0,
            'avg_time': 0,
            'requests_per_minute': 0,
            'last_reset_time': time.time()
        }
        
        # 初始化模型
        self._initialize_models()
    
    def _setup_logging(self) -> logging.Logger:
        """
        设置日志
        
        Returns:
            日志记录器
        """
        logger = logging.getLogger('MahjongModelService')
        logger.setLevel(logging.INFO)
        
        # 控制台日志
        if not logger.handlers:
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            console_handler.setFormatter(formatter)
            logger.addHandler(console_handler)
        
        return logger
    
    def _load_config(self) -> None:
        """
        加载配置
        """
        # 模型路径
        self.model_path = self.model_config.get('model_path', '')
        
        # 模型类型
        self.model_type = self.model_config.get('model_type', 'policy_network')
        
        # 设备
        self.device = self.model_config.get('device', 'cuda' if torch.cuda.is_available() else 'cpu')
        
        # 服务配置
        self.max_batch_size = self.service_config.get('max_batch_size', 32)
        self.caching_enabled = self.service_config.get('caching_enabled', True)
        self.cache_size = self.service_config.get('cache_size', 1000)
        
        # 缓存
        if self.caching_enabled:
            self.request_cache = {}
    
    def _initialize_models(self) -> None:
        """
        初始化模型
        """
        with self.lock:
            # 根据模型类型加载不同的模型
            if self.model_type == 'simple':
                # 简化版模型
                self.models['simple'] = SimpleMahjongInference(
                    model_path=self.model_path,
                    device=self.device
                )
                self.logger.info("已加载简化版推理模型")
            else:
                # 完整版模型
                self.models['full'] = MahjongInference(
                    model_path=self.model_path,
                    model_type=self.model_type,
                    model_config=self.model_config.get('model_params', {}),
                    device=self.device
                )
                self.logger.info("已加载完整版推理模型")
                
                # 如果需要决策器
                if self.service_config.get('use_decision_maker', False):
                    # 加载规则引擎（如果有）
                    rule_engine = None  # 这里可以加载规则引擎
                    self.models['decision_maker'] = MahjongDecisionMaker(
                        inference_interface=self.models['full'],
                        rule_engine=rule_engine
                    )
                    self.logger.info("已加载决策器")
    
    def _generate_cache_key(self, data: Dict) -> str:
        """
        生成缓存键
        
        Args:
            data: 请求数据
            
        Returns:
            缓存键
        """
        # 转换为字符串并生成哈希
        return str(sorted(data.items()))
    
    def _update_performance_metrics(self, request_time: float) -> None:
        """
        更新性能指标
        
        Args:
            request_time: 请求处理时间
        """
        with self.lock:
            self.performance_metrics['total_requests'] += 1
            self.performance_metrics['total_time'] += request_time
            self.performance_metrics['avg_time'] = self.performance_metrics['total_time'] / self.performance_metrics['total_requests']
            
            # 计算每分钟请求数
            current_time = time.time()
            elapsed_time = current_time - self.performance_metrics['last_reset_time']
            if elapsed_time > 60:  # 每分钟重置一次
                self.performance_metrics['requests_per_minute'] = self.performance_metrics['total_requests'] * 60 / elapsed_time
                self.performance_metrics['last_reset_time'] = current_time
                self.performance_metrics['total_requests'] = 0
    
    def predict(self, request_data: Dict) -> Dict:
        """
        执行预测
        
        Args:
            request_data: 请求数据
            
        Returns:
            预测结果
        """
        # 开始计时
        start_time = time.time()
        
        try:
            # 检查缓存
            if self.caching_enabled and request_data.get('use_cache', True):
                cache_key = self._generate_cache_key(request_data)
                if cache_key in self.request_cache:
                    self.logger.debug("命中缓存")
                    result = self.request_cache[cache_key]
                    
                    # 更新性能指标
                    self._update_performance_metrics(time.time() - start_time)
                    
                    return result
            
            # 根据模型类型执行预测
            model_name = request_data.get('model_name', 'full')
            
            if model_name not in self.models:
                raise ValueError(f"未知模型: {model_name}")
            
            # 执行预测
            if model_name == 'simple':
                # 简化版模型预测
                result = self._predict_simple(request_data)
            elif model_name == 'decision_maker':
                # 决策器预测
                result = self._predict_with_decision(request_data)
            else:
                # 完整版模型预测
                result = self._predict_full(request_data)
            
            # 添加元数据
            result['timestamp'] = datetime.now().isoformat()
            result['model_type'] = self.model_type
            result['processing_time'] = time.time() - start_time
            
            # 缓存结果
            if self.caching_enabled and request_data.get('use_cache', True):
                # 限制缓存大小
                if len(self.request_cache) >= self.cache_size:
                    # 移除最旧的缓存项
                    oldest_key = next(iter(self.request_cache))
                    del self.request_cache[oldest_key]
                
                cache_key = self._generate_cache_key(request_data)
                self.request_cache[cache_key] = result
            
            # 更新性能指标
            self._update_performance_metrics(time.time() - start_time)
            
            return result
        
        except Exception as e:
            self.logger.error(f"预测错误: {str(e)}")
            raise
    
    def _predict_simple(self, request_data: Dict) -> Dict:
        """
        使用简化版模型进行预测
        
        Args:
            request_data: 请求数据
            
        Returns:
            预测结果
        """
        # 获取输入数据
        hand_tiles = request_data.get('hand_tiles', [])
        visible_tiles = request_data.get('visible_tiles', [])
        last_tile = request_data.get('last_tile')
        
        # 获取模型
        model = self.models['simple']
        
        # 根据请求类型执行不同的预测
        if request_data.get('return_probs', False):
            # 返回动作和概率
            action, probs = model.predict_with_probs(hand_tiles, visible_tiles, last_tile)
            return {
                'action': action,
                'probabilities': {
                    model.action_map[i]: float(probs[i]) 
                    for i in range(len(model.action_map))
                }
            }
        else:
            # 只返回动作
            action = model.predict_action(hand_tiles, visible_tiles, last_tile)
            return {
                'action': action
            }
    
    def _predict_full(self, request_data: Dict) -> Dict:
        """
        使用完整版模型进行预测
        
        Args:
            request_data: 请求数据
            
        Returns:
            预测结果
        """
        # 获取游戏状态
        game_state = request_data.get('game_state', {})
        
        # 获取模型
        model = self.models['full']
        
        # 根据请求类型执行不同的预测
        prediction_type = request_data.get('prediction_type', 'best_move')
        
        if prediction_type == 'best_move':
            # 获取最佳动作
            result = model.get_best_move(game_state)
        elif prediction_type == 'ranked_moves':
            # 获取排名靠前的动作
            top_k = request_data.get('top_k', 3)
            result = model.get_ranked_moves(game_state, top_k=top_k)
        elif prediction_type == 'batch':
            # 批量预测
            game_states = request_data.get('game_states', [])
            batch_size = request_data.get('batch_size', 32)
            actions = model.batch_predict(game_states, batch_size=batch_size)
            result = {'predictions': actions}
        else:
            raise ValueError(f"未知预测类型: {prediction_type}")
        
        return result
    
    def _predict_with_decision(self, request_data: Dict) -> Dict:
        """
        使用决策器进行预测
        
        Args:
            request_data: 请求数据
            
        Returns:
            预测结果
        """
        # 获取游戏状态
        game_state = request_data.get('game_state', {})
        use_rules = request_data.get('use_rules', True)
        
        # 获取决策器
        decision_maker = self.models['decision_maker']
        
        # 根据请求类型执行不同的决策
        if request_data.get('detailed', False):
            # 详细决策
            result = decision_maker.make_detailed_decision(game_state)
        else:
            # 基本决策
            result = decision_maker.make_decision(game_state, use_rules=use_rules)
        
        return result
    
    def get_status(self) -> Dict:
        """
        获取服务状态
        
        Returns:
            状态信息
        """
        status = {
            'status': 'running',
            'timestamp': datetime.now().isoformat(),
            'models_loaded': list(self.models.keys()),
            'model_type': self.model_type,
            'device': self.device,
            'performance': self.performance_metrics.copy(),
            'cache_info': {
                'enabled': self.caching_enabled,
                'size': len(self.request_cache) if self.caching_enabled else 0,
                'max_size': self.cache_size
            }
        }
        
        return status
    
    def reload_model(self) -> None:
        """
        重新加载模型
        """
        self.logger.info("重新加载模型")
        with self.lock:
            # 清除缓存
            if self.caching_enabled:
                self.request_cache.clear()
            
            # 重新初始化模型
            self._initialize_models()
        
        self.logger.info("模型重新加载完成")
    
    def export_config(self, output_path: str) -> None:
        """
        导出配置
        
        Args:
            output_path: 输出路径
        """
        config = {
            'model_config': self.model_config,
            'service_config': self.service_config,
            'timestamp': datetime.now().isoformat()
        }
        
        # 创建目录
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        
        # 保存配置
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(config, f, ensure_ascii=False, indent=2)
        
        self.logger.info(f"配置已导出到: {output_path}")
    
    def shutdown(self) -> None:
        """
        关闭服务
        """
        self.logger.info("关闭服务")
        
        # 清除模型
        with self.lock:
            self.models.clear()
            if self.caching_enabled:
                self.request_cache.clear()
        
        self.logger.info("服务已关闭")


def create_model_service(model_config_path: str) -> MahjongModelService:
    """
    创建模型服务
    
    Args:
        model_config_path: 模型配置文件路径
        
    Returns:
        模型服务实例
    """
    # 加载配置
    with open(model_config_path, 'r', encoding='utf-8') as f:
        config = json.load(f)
    
    # 提取模型配置和服务配置
    model_config = config.get('model_config', {})
    service_config = config.get('service_config', {})
    
    # 创建服务
    return MahjongModelService(
        model_config=model_config,
        service_config=service_config
    )


def create_service_config(output_path: str, 
                         model_path: str,
                         model_type: str = 'policy_network',
                         device: Optional[str] = None,
                         **kwargs) -> None:
    """
    创建服务配置文件
    
    Args:
        output_path: 输出路径
        model_path: 模型路径
        model_type: 模型类型
        device: 运行设备
        **kwargs: 其他配置
    """
    # 默认设备
    if device is None:
        device = 'cuda' if torch.cuda.is_available() else 'cpu'
    
    # 构建配置
    config = {
        'model_config': {
            'model_path': model_path,
            'model_type': model_type,
            'device': device,
            'model_params': kwargs.get('model_params', {})
        },
        'service_config': {
            'max_batch_size': kwargs.get('max_batch_size', 32),
            'caching_enabled': kwargs.get('caching_enabled', True),
            'cache_size': kwargs.get('cache_size', 1000),
            'use_decision_maker': kwargs.get('use_decision_maker', False)
        }
    }
    
    # 创建目录
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    # 保存配置
    with open(output_path, 'w', encoding='utf-8') as f:
        json.dump(config, f, ensure_ascii=False, indent=2)
    
    print(f"服务配置已创建: {output_path}")


def main() -> None:
    """
    主函数（用于测试）
    """
    import argparse
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='麻将模型服务')
    parser.add_argument('--action', type=str, choices=['create_config', 'run_service'], default='run_service', help='操作类型')
    parser.add_argument('--config', type=str, help='配置文件路径')
    parser.add_argument('--model', type=str, help='模型路径')
    parser.add_argument('--model-type', type=str, default='policy_network', help='模型类型')
    
    args = parser.parse_args()
    
    if args.action == 'create_config':
        # 创建配置
        if not args.config:
            args.config = 'config/model_service_config.json'
        
        if not args.model:
            print("请提供模型路径")
            return
        
        create_service_config(
            output_path=args.config,
            model_path=args.model,
            model_type=args.model_type
        )
    else:
        # 运行服务
        if not args.config:
            print("请提供配置文件路径")
            return
        
        # 创建服务
        service = create_model_service(args.config)
        
        # 打印状态
        print("模型服务已启动")
        status = service.get_status()
        print(f"状态: {status['status']}")
        print(f"加载的模型: {status['models_loaded']}")
        print(f"设备: {status['device']}")
        
        # 测试预测（可选）
        test_request = {
            'model_name': 'full',
            'prediction_type': 'best_move',
            'game_state': {
                'player_hands': [[1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7]],
                'current_player': 0,
                'last_played_card': None,
                'game_round': 0
            }
        }
        
        try:
            result = service.predict(test_request)
            print("\n预测结果:")
            print(json.dumps(result, ensure_ascii=False, indent=2))
        except Exception as e:
            print(f"预测测试失败: {str(e)}")


if __name__ == '__main__':
    main()
