"""
四川麻将AI模型高级使用示例

本示例展示了如何使用四川麻将AI模型进行以下高级操作：
1. 使用模型服务进行实时推理
2. 批量推理和性能优化
3. 规则引擎与AI模型的集成
4. 多模型比较
5. 模型服务的监控和管理
"""
import os
import sys
import time
import numpy as np
import torch
from concurrent.futures import ThreadPoolExecutor, as_completed

# 添加项目根目录到路径
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))

# 导入必要的模块
from sichuanmajiang.model.model_service import (
    MahjongModelService, create_model_service, create_service_config
)
from sichuanmajiang.model.inference import MahjongInference, MahjongDecisionMaker
from sichuanmajiang.model.inference_simple import (
    SimpleMahjongInference, OnlineMahjongPlayer
)
from sichuanmajiang.model.evaluator import ModelEvaluator
from sichuanmajiang.engine.game_state import MahjongGameState
from sichuanmajiang.engine.rules import MahjongRules
from sichuanmajiang.data.preprocessor import FeatureEngineer


def create_model_service_example():
    """
    创建和配置模型服务示例
    
    Returns:
        service: 配置好的模型服务实例
    """
    print("创建模型服务示例...")
    
    # 创建服务配置
    config = create_service_config(
        model_path=None,  # 使用随机初始化的模型进行演示
        device="cuda" if torch.cuda.is_available() else "cpu",
        batch_size=32,
        max_workers=4,
        cache_size=1000,
        enable_monitoring=True,
        log_level="INFO"
    )
    
    # 创建模型服务
    service = create_model_service(config)
    
    print(f"模型服务创建成功:")
    print(f"- 设备: {config['device']}")
    print(f"- 批量大小: {config['batch_size']}")
    print(f"- 最大工作线程: {config['max_workers']}")
    print(f"- 缓存大小: {config['cache_size']}")
    print(f"- 监控已启用: {config['enable_monitoring']}")
    
    return service


def real_time_inference_example(service):
    """
    实时推理示例
    
    Args:
        service: 模型服务实例
    """
    print("\n实时推理示例...")
    
    # 创建特征工程器
    feature_engineer = FeatureEngineer()
    
    # 创建多个模拟游戏状态
    num_states = 10
    game_states = [MahjongGameState() for _ in range(num_states)]
    
    # 测试单个推理的性能
    print("测试单个推理性能:")
    start_time = time.time()
    
    for i, game_state in enumerate(game_states[:3]):
        # 生成游戏特征
        features = feature_engineer.create_game_features(game_state)
        
        # 进行推理
        result = service.predict(features)
        
        print(f"状态 {i+1}:")
        print(f"  - 推荐动作: {result['best_action']}")
        print(f"  - 动作概率: {result['action_probs'][:3]}...")
        print(f"  - 预测价值: {result['value']:.4f}")
    
    single_time = time.time() - start_time
    print(f"单个推理总耗时: {single_time:.4f}秒, 平均: {single_time/3:.4f}秒/次")
    
    # 测试批量推理的性能
    print("\n测试批量推理性能:")
    all_features = [feature_engineer.create_game_features(gs) for gs in game_states]
    
    start_time = time.time()
    batch_results = service.batch_predict(all_features)
    batch_time = time.time() - start_time
    
    print(f"批量推理总耗时: {batch_time:.4f}秒, 平均: {batch_time/num_states:.4f}秒/次")
    print(f"批量推理加速比: {single_time/3 / (batch_time/num_states):.2f}x")
    
    # 验证缓存功能
    print("\n测试缓存功能:")
    # 对相同的特征进行推理，应该使用缓存
    cached_features = all_features[0]
    
    # 第一次推理（不使用缓存）
    start_time = time.time()
    service.predict(cached_features)
    first_time = time.time() - start_time
    
    # 第二次推理（应该使用缓存）
    start_time = time.time()
    service.predict(cached_features)
    cached_time = time.time() - start_time
    
    print(f"首次推理时间: {first_time:.6f}秒")
    print(f"缓存推理时间: {cached_time:.6f}秒")
    print(f"缓存加速比: {first_time/cached_time:.2f}x")
    
    # 打印服务统计信息
    stats = service.get_performance_stats()
    print("\n服务性能统计:")
    print(f"- 总推理次数: {stats['total_inferences']}")
    print(f"- 缓存命中率: {stats['cache_hit_rate']:.2%}")
    print(f"- 平均推理时间: {stats['avg_inference_time']:.6f}秒")


def multi_threaded_inference_example(service):
    """
    多线程推理示例
    
    Args:
        service: 模型服务实例
    """
    print("\n多线程推理示例...")
    
    # 创建特征工程器
    feature_engineer = FeatureEngineer()
    
    # 创建大量模拟游戏状态
    num_states = 50
    game_states = [MahjongGameState() for _ in range(num_states)]
    all_features = [feature_engineer.create_game_features(gs) for gs in game_states]
    
    # 定义推理函数
    def infer_feature(feature):
        return service.predict(feature)
    
    # 使用线程池进行并行推理
    print(f"使用线程池并行推理 {num_states} 个样本...")
    start_time = time.time()
    
    results = []
    with ThreadPoolExecutor(max_workers=8) as executor:
        future_to_feature = {executor.submit(infer_feature, feature): i for i, feature in enumerate(all_features)}
        
        for future in as_completed(future_to_feature):
            index = future_to_feature[future]
            try:
                result = future.result()
                results.append((index, result))
            except Exception as e:
                print(f"推理失败: {e}")
    
    thread_time = time.time() - start_time
    
    # 对比串行推理
    print(f"多线程推理总耗时: {thread_time:.4f}秒")
    
    # 打印部分结果
    print("部分推理结果:")
    for index, result in sorted(results[:3]):
        print(f"状态 {index+1}: 推荐动作 {result['best_action']}, 价值 {result['value']:.4f}")


def rules_engine_integration_example():
    """
    规则引擎与AI模型集成示例
    """
    print("\n规则引擎与AI模型集成示例...")
    
    # 创建推理器和规则引擎
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    inference = SimpleMahjongInference(device=device)
    rules = MahjongRules()
    
    # 创建决策器（集成了AI和规则）
    decision_maker = MahjongDecisionMaker(inference, rules_engine=rules)
    
    # 创建模拟游戏状态
    game_state = MahjongGameState()
    
    # 模拟玩家手牌
    player_hand = [
        "一万", "一万", "一万",  # 刻子
        "二条", "三条", "四条",  # 顺子
        "五万", "六万", "七万",  # 顺子
        "九饼", "九饼",          # 对子
        "北风"                    # 单张
    ]
    game_state.player_hands[0] = player_hand
    
    # 模拟刚摸到一张牌
    game_state.current_tile = "北风"
    
    # 使用决策器进行决策
    print(f"玩家手牌: {player_hand}")
    print(f"刚摸到的牌: {game_state.current_tile}")
    
    # 不考虑规则的纯AI决策
    ai_decision = decision_maker.make_decision(game_state, use_rules=False)
    print(f"纯AI推荐动作: {ai_decision}")
    
    # 集成规则的决策
    integrated_decision = decision_maker.make_decision(game_state, use_rules=True)
    print(f"集成规则的推荐动作: {integrated_decision}")
    
    # 规则优先级示例
    print("\n规则优先级示例:")
    # 模拟玩家已经听牌
    game_state.player_status[0] = "ready"
    game_state.waiting_tiles[0] = ["北风"]  # 等待北风胡牌
    
    # 现在摸到北风
    game_state.current_tile = "北风"
    
    decision = decision_maker.make_decision(game_state, use_rules=True)
    print(f"听牌状态下摸到胡牌: {game_state.current_tile}")
    print(f"最终决策: {decision}")
    print(f"决策原因: {decision_maker.last_decision_explanation}")


def online_player_example():
    """
    在线玩家示例
    """
    print("\n在线玩家示例...")
    
    # 创建在线玩家
    player = OnlineMahjongPlayer(
        player_id=1,
        strategy="aggressive",  # 可选: aggressive, balanced, conservative
        device="cuda" if torch.cuda.is_available() else "cpu"
    )
    
    print(f"创建在线玩家: ID={player.player_id}, 策略={player.strategy}")
    
    # 模拟游戏状态
    game_state = MahjongGameState()
    
    # 模拟玩家手牌
    game_state.player_hands[1] = [
        "一饼", "二饼", "三饼",
        "四饼", "五饼", "六饼",
        "七饼", "八饼", "九饼",
        "东风", "东风", "南风", "南风"
    ]
    
    # 模拟玩家回合
    print(f"玩家 {player.player_id} 的回合")
    
    # 玩家决策
    action = player.make_move(game_state)
    print(f"玩家选择动作: {action}")
    
    # 设置游戏阶段
    game_state.current_phase = "discard"
    
    # 玩家出牌决策
    discard_tile = player.choose_discard(game_state)
    print(f"玩家选择打出: {discard_tile}")
    
    # 更新游戏状态
    game_state.discard_tile(player.player_id, discard_tile)
    
    # 打印玩家统计信息
    stats = player.get_stats()
    print("玩家统计:")
    for key, value in stats.items():
        print(f"- {key}: {value}")


def multi_model_comparison_example():
    """
    多模型比较示例
    """
    print("\n多模型比较示例...")
    
    # 创建评估器
    evaluator = ModelEvaluator(device="cuda" if torch.cuda.is_available() else "cpu")
    
    # 创建多个不同配置的模型进行比较
    model_configs = [
        {"name": "小型模型", "hidden_dim": 64, "num_res_blocks": 2},
        {"name": "中型模型", "hidden_dim": 128, "num_res_blocks": 3},
        {"name": "大型模型", "hidden_dim": 256, "num_res_blocks": 4}
    ]
    
    # 创建模型
    models = []
    for config in model_configs:
        print(f"创建 {config['name']}...")
        model = evaluator.build_model(
            model_type="mahjong_model",
            input_dim=324,
            action_dim=6,
            hidden_dim=config["hidden_dim"],
            num_res_blocks=config["num_res_blocks"]
        )
        models.append((config["name"], model))
    
    # 生成测试数据
    print("\n生成测试数据...")
    num_samples = 100
    test_features = np.random.rand(num_samples, 324).astype(np.float32)
    test_actions = np.random.randint(0, 6, num_samples)
    
    # 比较模型性能
    print("\n比较模型性能:")
    comparison_results = evaluator.compare_models(
        models, test_features, test_actions,
        batch_size=32,
        measure_latency=True
    )
    
    # 打印比较结果
    print("\n模型比较结果:")
    for model_name, metrics in comparison_results.items():
        print(f"\n{model_name}:")
        print(f"  - 准确率: {metrics['accuracy']:.4f}")
        print(f"  - 平均推理时间: {metrics['avg_latency']*1000:.2f} ms")
        print(f"  - 参数数量: {metrics['num_params']:,}")


def service_management_example():
    """
    模型服务管理示例
    """
    print("\n模型服务管理示例...")
    
    # 创建服务配置
    config = create_service_config(
        device="cuda" if torch.cuda.is_available() else "cpu",
        enable_monitoring=True,
        log_file="./service_logs.txt"
    )
    
    # 创建并启动服务
    print("启动模型服务...")
    service = MahjongModelService(config)
    service.start()
    
    try:
        # 执行一些推理操作
        print("执行推理操作...")
        features = np.random.rand(324).astype(np.float32)
        for _ in range(10):
            service.predict(features)
        
        # 获取服务状态
        status = service.get_status()
        print("\n服务状态:")
        print(f"- 运行状态: {'运行中' if status['running'] else '已停止'}")
        print(f"- 启动时间: {status['uptime']:.2f}秒")
        print(f"- 队列长度: {status['queue_length']}")
        
        # 获取监控数据
        metrics = service.get_monitoring_metrics()
        print("\n监控指标:")
        for key, value in metrics.items():
            print(f"- {key}: {value}")
        
        # 动态调整配置
        print("\n动态调整服务配置...")
        service.update_config({
            "batch_size": 64,
            "cache_size": 2000
        })
        print("配置已更新")
        
    finally:
        # 优雅关闭服务
        print("\n关闭模型服务...")
        service.stop()
        print("服务已关闭")


def main():
    """
    主函数，运行完整的高级示例流程
    """
    print("四川麻将AI模型高级使用示例")
    print("=" * 60)
    
    # 1. 创建模型服务
    try:
        service = create_model_service_example()
        
        # 2. 实时推理示例
        real_time_inference_example(service)
        
        # 3. 多线程推理示例
        multi_threaded_inference_example(service)
    except Exception as e:
        print(f"模型服务示例出错: {e}")
    
    # 4. 规则引擎集成示例
    try:
        rules_engine_integration_example()
    except Exception as e:
        print(f"规则引擎集成示例出错: {e}")
    
    # 5. 在线玩家示例
    try:
        online_player_example()
    except Exception as e:
        print(f"在线玩家示例出错: {e}")
    
    # 6. 多模型比较示例
    try:
        multi_model_comparison_example()
    except Exception as e:
        print(f"多模型比较示例出错: {e}")
    
    # 7. 服务管理示例
    try:
        service_management_example()
    except Exception as e:
        print(f"服务管理示例出错: {e}")
    
    print()
    print("高级示例运行完成!")
    print("=" * 60)
    print("提示:")
    print("- 本示例展示了四川麻将AI模型的多种高级用法")
    print("- 在实际应用中，可以根据需求选择适合的功能组合")
    print("- 对于生产环境，建议进一步完善错误处理和日志记录")
    print("- 可以根据实际硬件条件调整批处理大小和线程数量以获得最佳性能")


if __name__ == "__main__":
    main()
