# 1. 模型架构优化
#(a) 模型剪枝和压缩
'''
结构化剪枝：减少30%计算量，模型大小降低25%
关键指标：FLOPs从12B降至8.4B
'''
import torch
from torch.nn.utils import prune

def structured_model_pruning(model, pruning_ratio=0.3):
    """结构化剪枝：移除不重要的神经元/通道"""
    for name, module in model.named_modules():
        if isinstance(module, torch.nn.Linear):
            prune.ln_structured(
                module, 
                name='weight', 
                amount=pruning_ratio, 
                n=2, 
                dim=0
            )
    
    # 使剪枝永久化
    for name, module in model.named_modules():
        if isinstance(module, torch.nn.Linear):
            prune.remove(module, 'weight')
            
    return model

# (b) 知识蒸馏实现
'''
方法：使用原始BERT模型作为教师，训练6层轻量级模型
效果：模型大小减少60%，精度损失<0.3%
'''
class DistillationLoss(torch.nn.Module):
    """知识蒸馏损失函数"""
    def __init__(self, temperature=2.0, alpha=0.5):
        super().__init__()
        self.temperature = temperature
        self.alpha = alpha
        self.ce_loss = torch.nn.CrossEntropyLoss()
        self.kl_loss = torch.nn.KLDivLoss(reduction='batchmean')
    
    def forward(self, student_logits, teacher_logits, labels):
        # 硬目标损失
        hard_loss = self.ce_loss(student_logits, labels)
        
        # 软目标损失
        soft_student = torch.nn.functional.log_softmax(
            student_logits / self.temperature, dim=-1)
        soft_teacher = torch.nn.functional.softmax(
            teacher_logits / self.temperature, dim=-1)
        soft_loss = self.kl_loss(soft_student, soft_teacher) * (self.temperature ** 2)
        
        # 总损失
        return self.alpha * hard_loss + (1 - self.alpha) * soft_loss
    

#(c) 量化实现
'''
INT8量化：模型大小减少75%
混合精度量化：关键层保留FP16/FP32精度
'''
import torch.quantization

def quantize_model(model, calibration_data_loader):
    """静态量化模型"""
    # 1. 设置量化配置
    model.eval()
    model.qconfig = torch.quantization.get_default_qconfig('fbgemm')
    
    # 2. 准备量化
    model_prepared = torch.quantization.prepare(model)
    
    # 3. 校准
    with torch.no_grad():
        for inputs, _ in calibration_data_loader:
            model_prepared(inputs)
    
    # 4. 转换为量化模型
    model_quantized = torch.quantization.convert(model_prepared)
    
    return model_quantized

# 2. 编译优化阶段
# (a) TensorRT 转换与优化

import tensorrt as trt
import numpy as np
import torch
import os

def convert_to_tensorrt(torch_model, input_shape, fp16=True, int8=False, calibrator=None):
    """将PyTorch模型转换为TensorRT引擎"""
    # 创建Logger
    logger = trt.Logger(trt.Logger.WARNING)
    
    # 创建builder和network
    builder = trt.Builder(logger)
    network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
    config = builder.create_builder_config()
    
    # 设置最大工作空间大小
    config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 28)  # 256 MiB
    
    # 启用FP16精度
    if fp16 and builder.platform_has_fast_fp16:
        config.set_flag(trt.BuilderFlag.FP16)
    
    # 启用INT8精度
    if int8 and builder.platform_has_fast_int8:
        config.set_flag(trt.BuilderFlag.INT8)
        if calibrator:
            config.int8_calibrator = calibrator
    
    # 创建ONNX解析器
    parser = trt.OnnxParser(network, logger)
    
    # 导出PyTorch模型到ONNX
    input_tensor = torch.randn(input_shape)
    torch.onnx.export(
        torch_model, 
        input_tensor, 
        "temp_model.onnx",
        input_names=['input'],
        output_names=['output'],
        dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}
    )
    
    # 解析ONNX文件
    with open("temp_model.onnx", 'rb') as model:
        if not parser.parse(model.read()):
            print("ONNX解析失败!")
            for error in range(parser.num_errors):
                print(parser.get_error(error))
            return None
    
    # 设置优化配置
    profile = builder.create_optimization_profile()
    profile.set_shape(
        "input",                       # 输入张量名称
        (1, *input_shape[1:]),         # 最小形状
        input_shape,                   # 优化形状
        (64, *input_shape[1:])         # 最大形状
    )
    config.add_optimization_profile(profile)
    
    # 构建引擎
    engine = builder.build_engine(network, config)
    
    # 序列化引擎并保存
    with open("model_engine.trt", "wb") as f:
        f.write(engine.serialize())
    
    return engine

# (b) ONNX Runtime 优化
import onnx
import onnxruntime as ort
import numpy as np

def optimize_onnx_model(onnx_path, optimized_path):
    """使用ONNX Runtime优化ONNX模型"""
    # 加载模型
    model = onnx.load(onnx_path)
    
    # 基本检查
    onnx.checker.check_model(model)
    
    # 模型优化
    # 1. 常量折叠
    # 2. 消除冗余节点
    # 3. 算子融合
    from onnxruntime.transformers import optimizer
    opt_model = optimizer.optimize_model(
        onnx_path,
        model_type='bert',
        num_heads=12,
        hidden_size=768,
        optimization_level=99
    )
    
    # 保存优化后的模型
    opt_model.save_model_to_file(optimized_path)
    
    return optimized_path

# 3. 推理服务优化阶段

# (a) NVIDIA Triton Inference Server 配置
def create_model_repository(model_name, tensorrt_engine_path):
    """创建Triton模型仓库目录结构"""
    import os
    import json
    
    # 创建模型仓库目录
    repo_path = f"./model_repository/{model_name}"
    os.makedirs(f"{repo_path}/1", exist_ok=True)
    
    # 复制TensorRT引擎到模型目录
    import shutil
    shutil.copy(tensorrt_engine_path, f"{repo_path}/1/model.plan")
    
    # 创建config.pbtxt配置文件
    config = {
        "name": model_name,
        "platform": "tensorrt_plan",
        "max_batch_size": 64,
        "input": [
            {
                "name": "input",
                "data_type": "TYPE_FP32",
                "dims": [384]  # 根据实际输入形状调整
            }
        ],
        "output": [
            {
                "name": "output",
                "data_type": "TYPE_FP32",
                "dims": [2]  # 根据实际输出形状调整
            }
        ],
        "instance_group": [
            {
                "count": 2,  # GPU实例数量
                "kind": "KIND_GPU",
                "gpus": [0]  # 使用的GPU ID
            }
        ],
        "dynamic_batching": {
            "preferred_batch_size": [16, 32, 64],
            "max_queue_delay_microseconds": 50000  # 最大排队延迟50ms
        }
    }
    
    # 写入配置文件
    with open(f"{repo_path}/config.pbtxt", "w") as f:
        for key, value in config.items():
            if isinstance(value, dict):
                f.write(f"{key} {{\n")
                for k, v in value.items():
                    if isinstance(v, list):
                        for item in v:
                            if isinstance(item, dict):
                                f.write(f"  {k} {{\n")
                                for ik, iv in item.items():
                                    f.write(f"    {ik}: {iv}\n")
                                f.write(f"  }}\n")
                            else:
                                f.write(f"  {k}: {item}\n")
                    else:
                        f.write(f"  {k}: {v}\n")
                f.write(f"}}\n")
            elif isinstance(value, list):
                for item in value:
                    f.write(f"{key} {{\n")
                    for k, v in item.items():
                        f.write(f"  {k}: {v}\n")
                    f.write(f"}}\n")
            else:
                f.write(f"{key}: {value}\n")


# (b) 异步推理服务实现
import asyncio
import numpy as np
import tritonclient.grpc.aio as triton_grpc
from tritonclient.utils import np_to_triton_dtype

class AsyncInferenceService:
    """异步推理服务类"""
    def __init__(self, url="localhost:8001", model_name="risk_model"):
        self.url = url
        self.model_name = model_name
        self.client = None
    
    async def __aenter__(self):
        """异步上下文管理器入口"""
        self.client = triton_grpc.InferenceServerClient(self.url)
        await self.client.is_server_ready()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器退出"""
        await self.client.close()
    
    async def predict(self, input_data, request_id=None):
        """执行异步预测"""
        # 准备输入
        inputs = []
        inputs.append(triton_grpc.InferInput("input", input_data.shape, np_to_triton_dtype(input_data.dtype)))
        inputs[0].set_data_from_numpy(input_data)
        
        # 设置输出
        outputs = []
        outputs.append(triton_grpc.InferRequestedOutput("output"))
        
        # 创建推理选项
        request_id = request_id or f"req_{np.random.randint(0, 999999)}"
        infer_options = triton_grpc.InferOptions(request_id)
        
        # 异步执行推理
        result = await self.client.infer(
            model_name=self.model_name,
            inputs=inputs,
            outputs=outputs,
            query_params={},
            headers={},
            request_compression_algorithm=None,
            response_compression_algorithm=None,
            infer_options=infer_options
        )
        
        # 获取输出
        output = result.as_numpy("output")
        return output
    

# (c) 模型服务监控
import time
import numpy as np
import pandas as pd
import prometheus_client
from prometheus_client import Counter, Histogram, Gauge

class ModelMonitoring:
    """模型推理性能监控"""
    def __init__(self, model_name):
        self.model_name = model_name
        
        # 定义监控指标
        self.inference_requests = Counter(
            'inference_requests_total', 
            'Total number of inference requests', 
            ['model', 'status']
        )
        
        self.inference_latency = Histogram(
            'inference_latency_seconds', 
            'Inference latency in seconds',
            ['model'],
            buckets=(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0)
        )
        
        self.gpu_utilization = Gauge(
            'gpu_utilization_percent',
            'GPU utilization percentage',
            ['device']
        )
        
        # 启动Prometheus指标服务器
        prometheus_client.start_http_server(8000)
        
        # 启动GPU监控
        self._start_gpu_monitoring()
    
    def _start_gpu_monitoring(self):
        """启动后台线程监控GPU使用情况"""
        import threading
        
        def monitor_gpu():
            try:
                import pynvml
                pynvml.nvmlInit()
                device_count = pynvml.nvmlDeviceGetCount()
                
                while True:
                    for i in range(device_count):
                        handle = pynvml.nvmlDeviceGetHandleByIndex(i)
                        util = pynvml.nvmlDeviceGetUtilizationRates(handle)
                        self.gpu_utilization.labels(device=f"gpu_{i}").set(util.gpu)
                    time.sleep(1)
            except Exception as e:
                print(f"GPU monitoring error: {e}")
        
        thread = threading.Thread(target=monitor_gpu, daemon=True)
        thread.start()
    
    def record_request(self, status="success"):
        """记录请求"""
        self.inference_requests.labels(model=self.model_name, status=status).inc()
    
    def time_request(self):
        """测量请求时间的上下文管理器"""
        return self.inference_latency.labels(model=self.model_name).time()
    
    def generate_report(self, time_window_seconds=3600):
        """生成性能报告"""
        # 这里应该从Prometheus API获取指标
        # 简化版本示例
        report = {
            "total_requests": 0,
            "success_rate": 0.0,
            "p50_latency_ms": 0.0,
            "p95_latency_ms": 0.0,
            "p99_latency_ms": 0.0,
            "avg_gpu_util": 0.0
        }
        
        return report
    
# 4. 端到端服务实现
import os
import torch
import asyncio
import numpy as np
import uvicorn
from fastapi import FastAPI, HTTPException, BackgroundTasks
from pydantic import BaseModel
from typing import Dict, List, Any

# 导入之前定义的类
from AsyncInferenceService import AsyncInferenceService
from ModelMonitoring import ModelMonitoring

app = FastAPI(title="Risk Model Inference API")
monitoring = ModelMonitoring(model_name="risk_model")

# 用于批处理的请求队列
request_queue = asyncio.Queue()

# 请求模型
class RiskPredictionRequest(BaseModel):
    transaction_id: str
    features: List[float]

# 响应模型
class RiskPredictionResponse(BaseModel):
    transaction_id: str
    risk_score: float
    prediction: int
    latency_ms: float

# 批处理协程
async def batch_processor():
    """后台批处理任务"""
    async with AsyncInferenceService() as inference_service:
        while True:
            # 收集批处理
            batch_requests = []
            batch_size = 0
            max_batch_size = 64
            
            # 等待第一个请求
            request = await request_queue.get()
            batch_requests.append(request)
            batch_size = 1
            
            # 非阻塞地收集更多请求
            timeout = 0.01  # 10ms批处理窗口
            batch_start_time = asyncio.get_event_loop().time()
            
            try:
                while batch_size < max_batch_size and (asyncio.get_event_loop().time() - batch_start_time) < timeout:
                    try:
                        request = request_queue.get_nowait()
                        batch_requests.append(request)
                        batch_size += 1
                    except asyncio.QueueEmpty:
                        await asyncio.sleep(0.001)  # 短暂等待更多请求
            except Exception as e:
                print(f"Error collecting batch: {e}")
            
            if not batch_requests:
                continue
                
            # 准备批处理输入
            try:
                input_ids = np.vstack([req["features"] for req in batch_requests])
                transaction_ids = [req["transaction_id"] for req in batch_requests]
                
                # 执行批量推理
                with monitoring.time_request():
                    try:
                        results = await inference_service.predict(input_ids)
                        monitoring.record_request(status="success")
                    except Exception as e:
                        print(f"Inference error: {e}")
                        results = np.zeros((batch_size, 2))
                        monitoring.record_request(status="error")
                
                # 处理结果
                for i, (req, result) in enumerate(zip(batch_requests, results)):
                    req["future"].set_result({
                        "risk_score": float(result[1]),
                        "prediction": int(result[1] > 0.5)
                    })
            except Exception as e:
                print(f"Batch processing error: {e}")
                # 为所有请求设置错误
                for req in batch_requests:
                    if not req["future"].done():
                        req["future"].set_exception(e)

@app.on_event("startup")
async def startup_event():
    """启动时初始化批处理任务"""
    asyncio.create_task(batch_processor())

@app.post("/predict", response_model=RiskPredictionResponse)
async def predict(request: RiskPredictionRequest):
    """接收预测请求并返回结果"""
    start_time = asyncio.get_event_loop().time()
    
    # 创建Future对象用于获取结果
    future = asyncio.Future()
    
    # 将请求放入队列
    await request_queue.put({
        "transaction_id": request.transaction_id,
        "features": np.array(request.features, dtype=np.float32),
        "future": future
    })
    
    # 等待结果
    try:
        result = await asyncio.wait_for(future, timeout=1.0)  # 1秒超时
        end_time = asyncio.get_event_loop().time()
        latency_ms = (end_time - start_time) * 1000
        
        return RiskPredictionResponse(
            transaction_id=request.transaction_id,
            risk_score=result["risk_score"],
            prediction=result["prediction"],
            latency_ms=latency_ms
        )
    except asyncio.TimeoutError:
        monitoring.record_request(status="timeout")
        raise HTTPException(status_code=408, detail="Request timeout")

@app.get("/metrics")
async def get_metrics():
    """获取性能指标"""
    return monitoring.generate_report()

if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8080)