from fastapi import FastAPI, HTTPException, Depends, Header
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any
import uuid
import asyncio
from vllm import LLM
from dotenv import load_dotenv
import os
import uvicorn

# Load environment variables
load_dotenv()
MODEL_PATH = os.getenv("MODEL_PATH","./models")
DEFAULT_MODEL = os.getenv("MODEL_NAME","bge-reranker-v2-m3")
AUTH_TOKEN = os.getenv("AUTH_TOKEN")

# print(AUTH_TOKEN)
app = FastAPI()

# Request model
toolong = "模型名称可不传，默认为 .env 配置"
class RerankRequest(BaseModel):
    model: Optional[str] = Field(None, description="Hugging Face 模型名或本地路径，可选，优先使用 .env 中 MODEL_NAME")
    query: str
    documents: List[str]
    top_n: int = 4
    return_documents: bool = False
    max_chunks_per_doc: int = 1024
    overlap_tokens: int = 80

# Response models
class DocumentItem(BaseModel):
    text: Optional[str]

class ResultItem(BaseModel):
    document: Optional[DocumentItem]
    index: int
    relevance_score: float

class TokenUsage(BaseModel):
    input_tokens: int
    output_tokens: int

class RerankResponse(BaseModel):
    id: str
    results: List[ResultItem]
    tokens: TokenUsage

# In-memory cache for loaded models
_model_cache: Dict[str, LLM] = {}
_cache_lock = asyncio.Lock()

async def get_model(model_name: str) -> LLM:
    """
    Load or retrieve a cached vLLM 模型实例.
    """
    async with _cache_lock:
        if model_name not in _model_cache:
            model = f"{MODEL_PATH}/{model_name}"
            if not os.path.exists(model):
                raise HTTPException(status_code=404, detail=f"模型路径 {model} 不存在")

            _model_cache[model_name] = LLM(
                model=model,
                task='score',
                enforce_eager=True,
            )
        return _model_cache[model_name]

async def verify_token(authorization: str = Header(...)):
    """
    简单 Bearer Token 验证
    """
    try:
        scheme, token = authorization.split(" ", 1)
        # print(scheme, token)
    except ValueError:
        raise HTTPException(status_code=401, detail="Invalid authorization header format")
    if scheme.lower() != "bearer" or token != AUTH_TOKEN:
        raise HTTPException(status_code=401, detail="Unauthorized")

@app.post("/v1/rerank", response_model=RerankResponse, dependencies=[Depends(verify_token)])
async def rerank(request: RerankRequest):
    # 唯一请求 ID
    req_id = str(uuid.uuid4())

    # 选用模型：优先请求传入 否则使用 .env 配置
    model_name = request.model or DEFAULT_MODEL
    if not model_name:
        raise HTTPException(status_code=400, detail="模型名称未配置，请在 .env 或请求中指定 MODEL_NAME")

    # 异步线程池调用，避免阻塞事件循环
    try:
        # 加载或缓存模型
        model = await get_model(model_name)
        
        outputs = await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: model.score(request.query, request.documents)
        )
            # 按得分降序排序并选 top_n
        scored = sorted(
            enumerate(outputs),
            key=lambda x: x[1].outputs.score,
            reverse=True
        )[:request.top_n]

        results: List[ResultItem] = []
        for idx, out in scored:
            score = out.outputs.score
            doc_text = request.documents[idx] if request.return_documents else None
            results.append(ResultItem(
                document=DocumentItem(text=doc_text) if request.return_documents else None,
                index=idx,
                relevance_score=score,
            ))

        # TODO: 如果 vLLM 提供 token 统计，可在此填充
        tokens = TokenUsage(input_tokens=0, output_tokens=0)
        print(results)
        return RerankResponse(
            id=req_id,
            results=results,
            tokens=tokens,
        )
    except Exception as e:
        print(e)
        raise HTTPException(status_code=500, detail=str(e))


# 主函数
if __name__ == "__main__":
    uvicorn.run("main:app", host="0.0.0.0", port=8800, reload=True)

# 启动命令：
# uvicorn main:app --host 0.0.0.0 --port 8000
# .env 文件示例：
# MODEL_NAME=BAAI/bge-reranker-v2-m3
# AUTH_TOKEN=your_secure_token_here
