"""
FastAPI应用
提供BERT意图识别的HTTP服务接口
"""

from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from contextlib import asynccontextmanager
from pydantic import BaseModel, Field, field_validator
import os
import sys
import logging
from pathlib import Path
import json
import time
from typing import Dict, List, Optional, Any
import asyncio

# 添加项目根目录到路径
project_root = Path(__file__).parent.parent.parent
sys.path.append(str(project_root))

from src.models.predictor import load_predictor, PredictionPipeline
from src.utils.logger import setup_logger

# 设置日志
logger = setup_logger(__name__)

# 配置
MODEL_PATH = os.getenv('MODEL_PATH', './final-bert-intent-model')
CONFIDENCE_THRESHOLD = float(os.getenv('CONFIDENCE_THRESHOLD', '0.5'))
MAX_TEXT_LENGTH = int(os.getenv('MAX_TEXT_LENGTH', '512'))
MAX_BATCH_SIZE = int(os.getenv('MAX_BATCH_SIZE', '100'))

# 全局变量
predictor = None
prediction_pipeline = None
model_info = {}


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    # 启动
    logger.info("启动BERT意图识别API服务")
    success = await initialize_model()
    if not success:
        logger.warning("模型初始化失败，服务将在受限模式下运行")
    
    yield
    
    # 关闭
    logger.info("关闭BERT意图识别API服务")

# Pydantic数据模型
class PredictRequest(BaseModel):
    """单个预测请求模型"""
    text: str = Field(..., description="要预测的文本", max_length=MAX_TEXT_LENGTH)
    return_confidence: bool = Field(True, description="是否返回置信度")
    return_all_scores: bool = Field(False, description="是否返回所有类别的分数")
    use_threshold: bool = Field(False, description="是否使用置信度阈值判断")
    threshold: float = Field(CONFIDENCE_THRESHOLD, ge=0.0, le=1.0, description="置信度阈值")

    @field_validator('text')
    @classmethod
    def text_not_empty(cls, v):
        if not v or not v.strip():
            raise ValueError('文本不能为空')
        return v.strip()


class BatchPredictRequest(BaseModel):
    """批量预测请求模型"""
    texts: List[str] = Field(..., description="要预测的文本列表", max_items=MAX_BATCH_SIZE)
    batch_size: int = Field(32, ge=1, le=64, description="批处理大小")

    @field_validator('texts')
    @classmethod
    def texts_not_empty(cls, v):
        if not v:
            raise ValueError('文本列表不能为空')
        for i, text in enumerate(v):
            if not text or not str(text).strip():
                raise ValueError(f'第{i+1}个文本不能为空')
            if len(str(text)) > MAX_TEXT_LENGTH:
                raise ValueError(f'第{i+1}个文本长度超过限制({MAX_TEXT_LENGTH}字符)')
        return [str(text).strip() for text in v]


class ConversationRequest(BaseModel):
    """对话预测请求模型"""
    conversation: List[str] = Field(..., description="对话文本列表", max_items=50)

    @field_validator('conversation')
    @classmethod
    def conversation_valid(cls, v):
        if not v:
            raise ValueError('对话列表不能为空')
        return v


class DistributionRequest(BaseModel):
    """意图分布分析请求模型"""
    texts: List[str] = Field(..., description="要分析的文本列表", max_items=1000)

    @field_validator('texts')
    @classmethod
    def texts_valid(cls, v):
        if not v:
            raise ValueError('文本列表不能为空')
        return v


class BenchmarkRequest(BaseModel):
    """性能基准测试请求模型"""
    test_texts: List[str] = Field(..., description="测试文本列表", max_items=50)
    num_runs: int = Field(3, ge=1, le=10, description="运行次数")

    @field_validator('test_texts')
    @classmethod
    def test_texts_valid(cls, v):
        if not v:
            raise ValueError('测试文本列表不能为空')
        return v


# 响应模型
class PredictResponse(BaseModel):
    """预测响应模型"""
    success: bool = Field(True, description="请求是否成功")
    text: str = Field(..., description="输入文本")
    intent: str = Field(..., description="预测的意图")
    confidence: Optional[float] = Field(None, description="置信度")
    predicted_class_id: Optional[int] = Field(None, description="预测类别ID")
    all_scores: Optional[Dict[str, float]] = Field(None, description="所有类别的分数")
    timestamp: float = Field(..., description="处理时间戳")


class BatchPredictResponse(BaseModel):
    """批量预测响应模型"""
    success: bool = Field(True, description="请求是否成功")
    results: List[Dict[str, Any]] = Field(..., description="预测结果列表")
    total_texts: int = Field(..., description="总文本数量")
    processing_time: float = Field(..., description="处理时间(秒)")
    timestamp: float = Field(..., description="处理时间戳")


class HealthResponse(BaseModel):
    """健康检查响应模型"""
    status: str = Field("healthy", description="服务状态")
    message: str = Field(..., description="状态消息")
    model_loaded: bool = Field(..., description="模型是否已加载")
    timestamp: float = Field(..., description="时间戳")


class ModelInfoResponse(BaseModel):
    """模型信息响应模型"""
    success: bool = Field(True, description="请求是否成功")
    model_info: Dict[str, Any] = Field(..., description="模型信息")


class ErrorResponse(BaseModel):
    """错误响应模型"""
    success: bool = Field(False, description="请求是否成功")
    error: str = Field(..., description="错误信息")
    timestamp: Optional[float] = Field(None, description="时间戳")


# 创建FastAPI应用
app = FastAPI(
    title="BERT意图识别API",
    description="基于BERT的中文对话机器人意图识别服务",
    version="1.0.0",
    docs_url="/docs",
    redoc_url="/redoc",
    openapi_url="/openapi.json",
    lifespan=lifespan
)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应该限制具体域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


async def initialize_model():
    """异步初始化模型"""
    global predictor, prediction_pipeline, model_info
    
    try:
        logger.info(f"正在初始化模型，路径: {MODEL_PATH}")
        
        # 检查模型路径是否存在
        if not Path(MODEL_PATH).exists():
            logger.error(f"模型路径不存在: {MODEL_PATH}")
            return False
        
        # 在后台线程中加载模型（避免阻塞）
        loop = asyncio.get_event_loop()
        
        # 加载预测器
        predictor = await loop.run_in_executor(None, load_predictor, MODEL_PATH)
        prediction_pipeline = PredictionPipeline(MODEL_PATH, CONFIDENCE_THRESHOLD)
        
        # 获取模型信息
        model_info = predictor.get_model_info()
        
        logger.info("模型初始化成功")
        logger.info(f"支持的意图: {model_info.get('supported_intents', [])}")
        
        return True
        
    except Exception as e:
        logger.error(f"模型初始化失败: {e}")
        return False


@app.get("/", response_model=HealthResponse, summary="健康检查", description="检查API服务状态")
async def health_check():
    """健康检查接口"""
    return HealthResponse(
        status="healthy",
        message="BERT意图识别服务运行正常",
        model_loaded=predictor is not None,
        timestamp=time.time()
    )


@app.get("/model/info", response_model=ModelInfoResponse, summary="获取模型信息", description="获取当前加载的模型详细信息")
async def get_model_info():
    """获取模型信息"""
    if predictor is None:
        raise HTTPException(status_code=500, detail="模型未加载")
    
    return ModelInfoResponse(
        success=True,
        model_info=model_info
    )


@app.post("/predict", response_model=PredictResponse, summary="意图预测", description="对单个文本进行意图预测")
async def predict_intent(request: PredictRequest):
    """单个文本意图预测"""
    try:
        # 检查模型是否已加载
        if predictor is None:
            raise HTTPException(status_code=500, detail="模型未加载，请检查服务状态")
        
        # 在后台线程中进行预测（避免阻塞）
        loop = asyncio.get_event_loop()
        
        if request.use_threshold:
            result = await loop.run_in_executor(
                None, 
                predictor.predict_with_threshold, 
                request.text, 
                request.threshold
            )
        else:
            result = await loop.run_in_executor(
                None,
                predictor.predict_single,
                request.text,
                request.return_confidence,
                request.return_all_scores
            )
        
        # 检查预测结果
        if 'error' in result:
            raise HTTPException(status_code=500, detail=result['error'])
        
        # 构建响应
        response_data = {
            "success": True,
            "text": result["text"],
            "intent": result["intent"],
            "timestamp": time.time()
        }
        
        if "confidence" in result:
            response_data["confidence"] = result["confidence"]
        if "predicted_class_id" in result:
            response_data["predicted_class_id"] = result["predicted_class_id"]
        if "all_scores" in result:
            response_data["all_scores"] = result["all_scores"]
        
        return PredictResponse(**response_data)
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"预测过程中发生错误: {e}")
        raise HTTPException(status_code=500, detail="服务内部错误")


@app.post("/predict/batch", response_model=BatchPredictResponse, summary="批量意图预测", description="对多个文本进行批量意图预测")
async def batch_predict(request: BatchPredictRequest):
    """批量文本意图预测"""
    try:
        # 检查模型是否已加载
        if predictor is None:
            raise HTTPException(status_code=500, detail="模型未加载，请检查服务状态")
        
        # 在后台线程中进行批量预测
        loop = asyncio.get_event_loop()
        start_time = time.time()
        
        results = await loop.run_in_executor(
            None,
            predictor.predict_batch,
            request.texts,
            request.batch_size
        )
        
        processing_time = time.time() - start_time
        
        return BatchPredictResponse(
            success=True,
            results=results,
            total_texts=len(request.texts),
            processing_time=processing_time,
            timestamp=time.time()
        )
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"批量预测过程中发生错误: {e}")
        raise HTTPException(status_code=500, detail="服务内部错误")


@app.post("/predict/conversation", summary="对话意图预测", description="对对话序列进行意图预测")
async def predict_conversation(request: ConversationRequest):
    """对话序列意图预测"""
    try:
        if prediction_pipeline is None:
            raise HTTPException(status_code=500, detail="预测流水线未初始化")
        
        # 在后台线程中处理对话
        loop = asyncio.get_event_loop()
        results = await loop.run_in_executor(
            None,
            prediction_pipeline.process_conversation,
            request.conversation
        )
        
        return {
            "success": True,
            "conversation_results": results,
            "total_turns": len(request.conversation),
            "timestamp": time.time()
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"对话预测过程中发生错误: {e}")
        raise HTTPException(status_code=500, detail="服务内部错误")


@app.post("/analyze/distribution", summary="意图分布分析", description="分析文本集合的意图分布统计")
async def analyze_intent_distribution(request: DistributionRequest):
    """分析文本集合的意图分布"""
    try:
        if prediction_pipeline is None:
            raise HTTPException(status_code=500, detail="预测流水线未初始化")
        
        # 在后台线程中分析分布
        loop = asyncio.get_event_loop()
        distribution = await loop.run_in_executor(
            None,
            prediction_pipeline.analyze_intent_distribution,
            request.texts
        )
        
        return {
            "success": True,
            "distribution": distribution,
            "timestamp": time.time()
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"意图分布分析过程中发生错误: {e}")
        raise HTTPException(status_code=500, detail="服务内部错误")


@app.post("/benchmark", summary="性能基准测试", description="运行API性能基准测试")
async def benchmark_performance(request: BenchmarkRequest):
    """性能基准测试"""
    try:
        if predictor is None:
            raise HTTPException(status_code=500, detail="模型未加载")
        
        # 在后台线程中运行基准测试
        loop = asyncio.get_event_loop()
        benchmark_stats = await loop.run_in_executor(
            None,
            predictor.benchmark_performance,
            request.test_texts,
            request.num_runs
        )
        
        return {
            "success": True,
            "benchmark_stats": benchmark_stats,
            "timestamp": time.time()
        }
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"性能基准测试过程中发生错误: {e}")
        raise HTTPException(status_code=500, detail="服务内部错误")


@app.exception_handler(HTTPException)
async def http_exception_handler(request, exc):
    """HTTP异常处理器"""
    return JSONResponse(
        status_code=exc.status_code,
        content=ErrorResponse(
            success=False,
            error=exc.detail,
            timestamp=time.time()
        ).dict()
    )


@app.exception_handler(ValueError)
async def value_error_handler(request, exc):
    """值错误异常处理器"""
    return JSONResponse(
        status_code=400,
        content=ErrorResponse(
            success=False,
            error=str(exc),
            timestamp=time.time()
        ).dict()
    )


@app.exception_handler(Exception)
async def general_exception_handler(request, exc):
    """通用异常处理器"""
    logger.error(f"未处理的异常: {exc}")
    return JSONResponse(
        status_code=500,
        content=ErrorResponse(
            success=False,
            error="服务器内部错误",
            timestamp=time.time()
        ).dict()
    )


# 添加额外的API端点
@app.get("/openapi.json", include_in_schema=False)
async def get_openapi():
    """获取OpenAPI规范"""
    return app.openapi()


@app.get("/health/detailed", summary="详细健康检查", description="获取详细的服务健康状态信息")
async def detailed_health_check():
    """详细健康检查"""
    health_info = {
        "status": "healthy" if predictor is not None else "degraded",
        "timestamp": time.time(),
        "model_loaded": predictor is not None,
        "model_path": MODEL_PATH,
        "confidence_threshold": CONFIDENCE_THRESHOLD,
        "max_text_length": MAX_TEXT_LENGTH,
        "max_batch_size": MAX_BATCH_SIZE
    }
    
    if predictor is not None:
        health_info.update({
            "model_info": model_info,
            "supported_intents": model_info.get('supported_intents', [])
        })
    
    return health_info


def create_app(config=None):
    """应用工厂函数"""
    if config:
        # 更新配置
        pass
    
    return app


if __name__ == '__main__':
    import uvicorn
    
    # 设置日志级别
    logging.basicConfig(level=logging.INFO)
    
    # 从环境变量获取配置
    host = os.getenv('HOST', '0.0.0.0')
    port = int(os.getenv('PORT', '5000'))
    debug = os.getenv('DEBUG', 'False').lower() == 'true'
    reload = debug  # 开发模式下启用热重载
    
    logger.info(f"启动BERT意图识别FastAPI服务")
    logger.info(f"模型路径: {MODEL_PATH}")
    logger.info(f"置信度阈值: {CONFIDENCE_THRESHOLD}")
    logger.info(f"服务地址: http://{host}:{port}")
    logger.info(f"API文档: http://{host}:{port}/docs")
    logger.info(f"ReDoc文档: http://{host}:{port}/redoc")
    
    # 启动服务
    uvicorn.run(
        "src.api.app:app",
        host=host,
        port=port,
        reload=reload,
        log_level="info" if not debug else "debug",
        access_log=True
    ) 