#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
EndoSight-UC AI推理服务器 - V3.0 堡垒安全版本
使用训练好的UCEIS评分模型进行内镜图像分析
集成企业级安全特性和性能优化
"""

import os
import sys
import logging
import traceback
import time
import uuid
import json
import io
from pathlib import Path
from typing import Dict, Any

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import numpy as np
import structlog

from fastapi import FastAPI, File, UploadFile, HTTPException, Request, Depends, status, Form
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.concurrency import run_in_threadpool
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.util import get_remote_address
from slowapi.errors import RateLimitExceeded

# 导入V4.0安全模块
from core.config import settings, validate_config
from core.security import SecurityValidator
from core.secure_loader import load_model_securely, ModelSecurityError
from core.ai_security import ai_security_validator, AISecurityError
from core.secure_logging import (
    SecureLoggerConfig, log_security_event, detect_anomalies,
    security_logger, log_manager
)
from core.schemas import (
    UCEISResponse, HealthResponse, ErrorResponse,
    ValidationErrorResponse, RateLimitResponse
)

# V4.0: 配置安全结构化日志
SecureLoggerConfig.configure_secure_logging()

logger = structlog.get_logger()

# 全局变量
model = None
device = None
tokenizer = None
server_start_time = time.time()

# UCEIS评分映射 (0-8分)
SEVERITY_MAPPING = {
    0: "缓解期",
    1: "轻度",
    2: "轻度",
    3: "中度",
    4: "中度",
    5: "重度",
    6: "重度",
    7: "重度",
    8: "重度"
}

# [修复 M4] 初始化速率限制器
limiter = Limiter(
    key_func=get_remote_address,
    default_limits=[f"{settings.RATE_LIMIT_PER_MINUTE}/minute"]
)

# 创建FastAPI应用
app = FastAPI(
    title="EndoSight-UC AI推理服务器 (V3.0 Hardened)",
    description="溃疡性结肠炎内镜严重程度指数(UCEIS)评分AI服务 - 企业级安全版本",
    version="3.0.0",
    docs_url="/docs",
    redoc_url="/redoc"
)

# 注册 SlowAPI 状态
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)

# [修复 M5] V4.0: 安全全局异常处理器
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
    """V4.0: 全局异常处理器，防止信息泄露并记录安全事件"""
    client_ip = request.client.host if request.client else "unknown"
    user_agent = request.headers.get("User-Agent", "Unknown")

    # V4.0: 安全日志记录（不包含敏感异常详情）
    logger.error("服务器内部错误",
                client_ip=client_ip,
                path=request.url.path,
                method=request.method,
                error_type=type(exc).__name__)

    # V4.0: 记录安全事件
    log_security_event(
        event_type="server_error",
        severity="MEDIUM",
        client_ip=client_ip,
        details={
            "path": str(request.url.path),
            "method": request.method,
            "error_type": type(exc).__name__,
            "user_agent_length": len(user_agent)
        },
        user_agent=user_agent
    )

    # V4.0: 检测异常模式
    log_data = {
        "message": str(exc),
        "level": "ERROR",
        "client_ip": client_ip
    }
    anomalies = detect_anomalies(log_data)
    if anomalies:
        log_security_event(
            event_type="anomaly_detected",
            severity="HIGH",
            client_ip=client_ip,
            details={"anomalies": anomalies}
        )

    return JSONResponse(
        status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
        content=ErrorResponse(
            message="服务器内部错误，请联系管理员",
            error_code="INTERNAL_SERVER_ERROR",
            timestamp=time.time()
        ).dict(),
    )

@app.exception_handler(RateLimitExceeded)
async def rate_limit_handler(request: Request, exc: RateLimitExceeded):
    """V4.0: 速率限制异常处理器"""
    client_ip = request.client.host if request.client else "unknown"
    user_agent = request.headers.get("User-Agent", "Unknown")

    # V4.0: 安全日志记录
    logger.warning("速率限制触发",
                  client_ip=client_ip,
                  path=request.url.path,
                  user_agent=user_agent[:100])  # 截断长user agent

    # V4.0: 记录安全事件
    log_security_event(
        event_type="rate_limit_exceeded",
        severity="MEDIUM",
        client_ip=client_ip,
        details={
            "path": str(request.url.path),
            "retry_after": exc.detail.get("retry-after", 60),
            "limit": settings.RATE_LIMIT_PER_MINUTE
        },
        user_agent=user_agent
    )

    return JSONResponse(
        status_code=status.HTTP_429_TOO_MANY_REQUESTS,
        content=RateLimitResponse(
            retry_after=exc.detail.get("retry-after", 60),
            limit=settings.RATE_LIMIT_PER_MINUTE,
            timestamp=time.time()
        ).dict(),
    )

# [修复 H2] 严格配置 CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=settings.get_cors_origins(),  # 关键修复：不再使用 "*"
    allow_credentials=True,
    allow_methods=["GET", "POST"],
    allow_headers=["*"],
)

# 性能监控中间件
@app.middleware("http")
async def add_security_headers(request: Request, call_next):
    """添加安全头和性能监控"""
    start_time = time.time()
    response = await call_next(request)
    process_time = time.time() - start_time

    # 添加安全头
    response.headers["X-Process-Time"] = str(round(process_time, 3))
    response.headers["X-Content-Type-Options"] = "nosniff"
    response.headers["X-Frame-Options"] = "DENY"
    response.headers["X-XSS-Protection"] = "1; mode=block"
    response.headers["Server"] = "EndoSight-UC/3.0"

    return response

# 导入训练时使用的模型结构
sys.path.insert(0, str(Path(__file__).parent.parent / "uc_model"))
try:
    from models.fusion_model import EndoMultimodalModel
    logger.info("成功导入训练时使用的EndoMultimodalModel")
except ImportError as e:
    logger.error(f"无法导入训练模型: {e}")
    sys.exit(1)

def load_model() -> bool:
    """加载训练好的模型 - V4.0终极安全版本"""
    global model, device, tokenizer

    try:
        logger.info("开始安全加载UCEIS模型...")
        model_path = Path(settings.get_model_path())

        if not model_path.exists():
            logger.error(f"模型文件不存在: {model_path}")
            return False

        # 设备选择
        if torch.cuda.is_available():
            device = torch.device('cuda')
            logger.info(f"使用GPU: {torch.cuda.get_device_name(0)}")
        else:
            device = torch.device('cpu')
            logger.info("使用CPU进行推理")

        # 初始化tokenizer
        from transformers import BertTokenizer
        tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        logger.info("BERT tokenizer加载成功")

        # V4.0: 使用安全模型加载器
        logger.info("使用安全加载器验证模型文件...")

        try:
            # 创建模型实例
            logger.info("初始化模型结构...")
            model_instance = EndoMultimodalModel(
                image_pretrained=True,
                text_pretrained='bert-base-uncased',
                feature_dim=256,
                num_classes=8
            )
            logger.info("模型结构创建成功")

            # V4.0: 安全加载模型权重
            logger.info(f"安全加载模型权重: {model_path}")

            # 使用安全加载器加载state dict
            state_dict = load_model_securely(str(model_path), device)

            # 将state dict加载到模型
            model_instance.load_state_dict(state_dict, strict=True)
            logger.info("安全加载模型权重成功")

            # 移动到设备并设置为评估模式
            model_instance.to(device)
            model_instance.eval()

            # 记录模型信息
            total_params = sum(p.numel() for p in model_instance.parameters())
            trainable_params = sum(p.numel() for p in model_instance.parameters() if p.requires_grad)

            logger.info("模型安全加载成功！",
                       total_params=total_params,
                       trainable_params=trainable_params,
                       model_path=str(model_path))

            # 全局赋值
            model = model_instance

            return True

        except ModelSecurityError as e:
            logger.error(f"模型安全检查失败: {str(e)}")
            return False
        except Exception as e:
            logger.error(f"模型加载过程失败: {str(e)}")
            logger.error(f"错误详情: {traceback.format_exc()}")
            return False

    except Exception as e:
        logger.error(f"模型加载系统失败: {str(e)}")
        logger.error(f"错误详情: {traceback.format_exc()}")
        return False

def preprocess_image(image_bytes: bytes) -> torch.Tensor:
    """图像预处理 - V3.0安全版本"""
    try:
        # 从字节数据创建图像
        image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
        logger.info(f"成功加载图像: 尺寸: {image.size}")

        # 预处理变换
        transform = transforms.Compose([
            transforms.Resize((512, 512)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                               std=[0.229, 0.224, 0.225])
        ])

        # 应用变换
        image_tensor = transform(image).unsqueeze(0)  # 添加batch维度
        return image_tensor.to(device)

    except Exception as e:
        logger.error(f"图像预处理失败: {str(e)}")
        raise ValueError(f"图像预处理失败: {str(e)}")

def predict_uceis_score(image_tensor: torch.Tensor, text_input: str) -> Dict[str, Any]:
    """
    预测UCEIS评分 - V3.0线程池安全版本
    [修复 M5] 此函数不应抛出 HTTPException，应抛出标准异常
    """
    try:
        start_time = time.time()

        with torch.no_grad():
            # 准备文本输入
            text_inputs = tokenizer(
                text_input,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=128
            )

            # 移动到正确设备
            for k, v in text_inputs.items():
                text_inputs[k] = v.to(device)

            # 模型推理
            outputs = model(image_tensor, text_inputs)  # 返回 (B, 8) 的logits

            # 获取预测结果
            logits = outputs.squeeze(0)  # 移除batch维度，得到 (8,)
            probabilities = F.softmax(logits, dim=0)

            # 获取预测分数
            predicted_class = torch.argmax(probabilities).item()
            uceis_score = predicted_class

            # 计算置信度
            confidence = probabilities[predicted_class].item()

            # 构建详细分数
            detailed_scores = {
                'total': {
                    'score': uceis_score,
                    'confidence': confidence,
                    'probabilities': probabilities.cpu().numpy().tolist()
                }
            }

            # 获取严重程度
            severity = SEVERITY_MAPPING.get(uceis_score, "未知")

            processing_time = time.time() - start_time

            logger.info("推理完成",
                       uceis_score=uceis_score,
                       severity=severity,
                       confidence=round(confidence, 3),
                       processing_time=round(processing_time, 3))

            return {
                'uceis_score': uceis_score,
                'severity': severity,
                'confidence': confidence,
                'detailed_scores': detailed_scores
            }

    except Exception as e:
        logger.error(f"模型推理失败: {e}", exc_info=True)
        # 抛出标准异常，由全局处理器捕获为 500
        raise ValueError(f"Inference failed: {e}")

# 应用启动事件
@app.on_event("startup")
async def startup_event():
    """启动时加载模型和验证配置"""
    try:
        # 验证配置
        validate_config()
        logger.info("配置验证通过")

        # 加载模型
        success = load_model()
        if success:
            logger.info("✅ V3.0推理服务器启动成功！")
        else:
            logger.error("❌ 模型加载失败，服务器将无法正常工作")
            raise RuntimeError("模型加载失败")

    except Exception as e:
        logger.error(f"启动失败: {e}")
        raise

# 端点: 健康检查 (V3.0)
@app.get("/health", response_model=HealthResponse)
@limiter.limit("60/minute")  # 健康检查也需要限流
async def health_check(request: Request):
    """
    [修复 L9] 最小化信息泄露。
    如果服务不健康 (e.g., model is None)，全局处理器会捕获并返回 500。
    """
    try:
        if model is None or device is None:
            raise HTTPException(
                status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
                detail="模型未加载",
            )

        # 成功只返回状态，不泄露 device, model_loaded 等信息
        uptime = time.time() - server_start_time
        return HealthResponse(
            status="healthy",
            message=f"服务正常运行 {uptime:.0f} 秒"
        )

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"健康检查失败: {e}")
        raise HTTPException(
            status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
            detail="服务不可用"
        )

# 端点: 核心推理 (V3.0)
@app.post("/v1/predict", response_model=UCEISResponse)
@limiter.limit(f"{settings.RATE_LIMIT_PER_MINUTE}/minute")  # [修复 M4]
async def predict_v3(
    request: Request,
    text_input: str = Form(default="endoscopic image of colon mucosa for UCEIS scoring"),
    file: UploadFile = File(...)
):
    """
    V4.0 终极安全端点:
    1. (M4) 速率限制
    2. (H3) 依赖注入式文件验证
    3. (HIGH-4) AI对抗样本检测
    4. (P2) 异步线程池处理
    5. (P1) Pydantic 响应模型
    """
    start_time = time.time()
    task_id = str(uuid.uuid4())

    logger.info("开始处理预测请求", task_id=task_id, client_ip=request.client.host)

    try:
        # 1. 安全验证文件
        validator = SecurityValidator()
        image_bytes, metadata = await validator.validate_upload(file)

        # 2. V4.0: AI安全检测 - 对抗样本检测
        if settings.ENABLE_AI_SECURITY_CHECKS:
            is_safe, security_report = ai_security_validator.validate_input_security(
                image_bytes, request.client.host
            )

            if not is_safe:
                logger.warning("AI安全检测失败",
                             task_id=task_id,
                             client_ip=request.client.host,
                             risk_factors=security_report.get('risk_factors', []))
                raise HTTPException(
                    status_code=status.HTTP_400_BAD_REQUEST,
                    detail="输入图像未通过AI安全检测，可能包含对抗性扰动"
                )

            logger.info("AI安全检测通过",
                       task_id=task_id,
                       security_score=security_report.get('adversarial_detection', {}).get('risk_score', 0))

        # 3. 预处理 (同步，但快)
        image_tensor = preprocess_image(image_bytes)

        # 4. (P2 修复) 将 2-3 秒的阻塞任务扔进线程池
        result_data = await run_in_threadpool(
            predict_uceis_score,
            image_tensor,
            text_input
        )

        processing_time = time.time() - start_time

        # 5. (P1 修复) 组装 Pydantic 响应
        response = UCEISResponse(
            task_id=task_id,
            processing_time=round(processing_time, 3),
            success=True,
            message="预测成功",
            **result_data
        )

        logger.info("预测完成",
                   task_id=task_id,
                   uceis_score=result_data['uceis_score'],
                   confidence=round(result_data['confidence'], 3),
                   processing_time=round(processing_time, 3))

        return response

    except ValueError as e:
        # 模型推理失败
        logger.error(f"推理失败: {e}", task_id=task_id)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="AI推理服务暂时不可用"
        )
    except HTTPException:
        raise
    except Exception as e:
        # 捕获所有异常，并重新抛出为 HTTP 500
        logger.error(f"任务处理失败: {e}", task_id=task_id, exc_info=True)
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail="服务暂时不可用"
        )

# 根路径
@app.get("/")
async def root():
    """根路径信息"""
    return {
        "service": "EndoSight-UC AI推理服务器 (V3.0 Hardened)",
        "status": "running",
        "version": "3.0.0",
        "endpoints": {
            "/health": "健康检查",
            "/v1/predict": "UCEIS评分预测",
            "/docs": "API文档 (Swagger)",
            "/redoc": "API文档 (ReDoc)"
        },
        "features": [
            "企业级安全防护",
            "智能速率限制",
            "深度文件验证",
            "结构化日志",
            "性能监控"
        ]
    }

if __name__ == "__main__":
    import uvicorn
    logger.info("启动EndoSight-UC AI推理服务器 V3.0...")
    uvicorn.run(
        app,
        host=settings.HOST,
        port=settings.PORT,
        reload=False,
        log_level=settings.LOG_LEVEL.lower()
    )