"""
FinBERT API服务
"""
import os
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Union, Dict
import uvicorn
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
from starlette.responses import Response

from .model_loader import FinbertModelLoader
from .inference import FinbertInference

# 配置日志
logging.basicConfig(
    level=os.getenv("LOG_LEVEL", "INFO"),
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# Prometheus指标
request_counter = Counter(
    "finbert_requests_total",
    "Total number of requests",
    ["endpoint", "status"]
)

request_duration = Histogram(
    "finbert_request_duration_seconds",
    "Request duration in seconds",
    ["endpoint"]
)

# 全局变量
model_loader = None
inference_engine = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global model_loader, inference_engine
    
    # 启动时加载模型
    logger.info("正在加载模型...")
    try:
        model_loader = FinbertModelLoader()
        model_loader.load_model()
        inference_engine = FinbertInference(
            model_loader.get_model(),
            model_loader.get_tokenizer(),
            model_loader.get_device()
        )
        logger.info("模型加载完成")
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    
    yield
    
    # 关闭时清理资源
    logger.info("正在清理资源...")


app = FastAPI(
    title="FinBERT API",
    description="基于FinBERT的金融文本情感分析服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class HealthResponse(BaseModel):
    """健康检查响应"""
    status: str
    model_loaded: bool
    device: str
    version: str
    model_info: dict
    labels: List[str]


class SentimentAnalysisRequest(BaseModel):
    """情感分析请求"""
    text: Union[str, List[str]]
    return_all_scores: Optional[bool] = False
    max_length: Optional[int] = 512


class SentimentAnalysisResponse(BaseModel):
    """情感分析响应"""
    #text: str
    label: str
    score: float
    #scores: Optional[Dict[str, float]] = None


@app.get("/health", response_model=HealthResponse)
async def health_check():
    """健康检查"""
    import torch
    device_available = False
    device_name = "cpu"
    
    if hasattr(torch, 'npu') and torch.npu.is_available():
        device_available = True
        device_name = "npu"
    elif torch.cuda.is_available():
        device_available = True
        device_name = "cuda"
    
    labels = inference_engine.get_labels() if inference_engine else []
    
    return {
        "status": "healthy",
        "model_loaded": model_loader is not None and inference_engine is not None,
        "device": device_name,
        "version": "1.0.0",
        "model_info": {
            "name": "finbert",
            "type": "sentiment-analysis",
            "max_length": 512
        },
        "labels": labels
    }


@app.get("/metrics")
async def metrics():
    """Prometheus指标"""
    return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)


@app.post("/api/v1/sentiment", response_model=List[SentimentAnalysisResponse])
async def analyze_sentiment(request: SentimentAnalysisRequest):
    """
    情感分析接口
    """
    request_counter.labels(endpoint="/api/v1/sentiment", status="received").inc()
    
    try:
        # if isinstance(request.text, str):
        #     # 单个文本
        #     result = inference_engine.predict(
        #         texts=request.text,
        #         return_all_scores=request.return_all_scores,
        #         max_length=request.max_length
        #     )
        #
        #     response = {
        #         "text": result.get("text", request.text),
        #         "label": result.get("label", ""),
        #         "score": result.get("score", 0.0)
        #     }
        #
        #     if request.return_all_scores and "scores" in result:
        #         response["scores"] = result["scores"]
        #
        #     request_counter.labels(endpoint="/api/v1/sentiment", status="success").inc()
        #     return response
        # else:
            # 批量文本
            result = inference_engine.batch_classify(
                texts=request.text,
                return_all_scores=request.return_all_scores,
                max_length=request.max_length
            )
            
            request_counter.labels(endpoint="/api/v1/sentiment", status="success").inc()
            return result
            
    except Exception as e:
        logger.error(f"情感分析失败: {str(e)}")
        request_counter.labels(endpoint="/api/v1/sentiment", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))


@app.post("/api/v1/classify")
async def classify_text(request: SentimentAnalysisRequest):
    """
    文本分类接口（别名）
    """
    return await analyze_sentiment(request)


@app.get("/api/v1/labels")
async def get_labels():
    """
    获取模型支持的情感标签列表
    """
    try:
        labels = inference_engine.get_labels() if inference_engine else []
        return {
            "labels": labels,
            "count": len(labels)
        }
    except Exception as e:
        logger.error(f"获取标签失败: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


if __name__ == "__main__":
    port = int(os.getenv("PORT", 8000))
    workers = int(os.getenv("WORKERS", 1))
    
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=port,
        workers=workers,
        log_level=os.getenv("LOG_LEVEL", "info").lower()
    )

