import os
import torch
from typing import List, Dict, Any, Optional
from pydantic import BaseModel, Field
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import uvicorn

from FlagEmbedding import FlagAutoReranker

# 定义请求和响应模型
class QueryPassagePair(BaseModel):
    query: str = Field(..., description="用户查询内容")
    passage: str = Field(..., description="需要评估的文档段落")

class RerankerRequest(BaseModel):
    pairs: List[QueryPassagePair] = Field(..., description="查询-段落对列表")
    normalize: bool = Field(True, description="是否归一化分数到0-1范围")

class ScoreResult(BaseModel):
    query: str
    passage: str
    score: float
    normalized_score: Optional[float] = None
    relevance: str

class RerankerResponse(BaseModel):
    scores: List[ScoreResult]
    success: bool = True
    message: str = "评分成功"

# 创建FastAPI应用
app = FastAPI(
    title="BGE Reranker API",
    description="用于文档重排序的BGE Reranker API",
    version="1.0.0",
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 全局变量存储加载的模型
reranker_model = None

# 初始化模型
def init_model():
    global reranker_model
    
    # 检测是否有cuda可用
    if torch.cuda.is_available():
        device = ["cuda"]
        print("使用CUDA加速(NNVIDIA_SMI)")
        use_fp16 = True
    else:
        device = ["cpu"]
        print("使用CPU")
        use_fp16 = False
    
    # 加载模型
    reranker_model = FlagAutoReranker.from_finetuned(
        'BAAI/bge-reranker-large',
        use_fp16=use_fp16,
        batch_size=8,
        query_max_length=256,
        max_length=512,
        devices=device,
        cache_dir=os.getenv('HF_HUB_CACHE', None),
    )
    print("BGE Reranker模型加载完成")

# 应用启动时初始化模型
@app.on_event("startup")
async def startup_event():
    init_model()

# 定义健康检查端点
@app.get("/health")
async def health_check():
    return {"status": "healthy", "model_loaded": reranker_model is not None}

# 定义重排序API端点
@app.post("/rerank", response_model=RerankerResponse)
async def rerank(request: RerankerRequest):
    global reranker_model
    
    # 检查模型是否已加载
    if reranker_model is None:
        try:
            init_model()
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"模型加载失败: {str(e)}")
    
    try:
        # 转换请求数据格式
        pairs = [[pair.query, pair.passage] for pair in request.pairs]
        
        if not pairs:
            raise HTTPException(status_code=400, detail="未提供查询-段落对")
        
        # 计算原始分数
        original_scores = reranker_model.compute_score(pairs)
        
        # 如果请求归一化，计算归一化分数
        normalized_scores = None
        if request.normalize:
            normalized_scores = reranker_model.compute_score(pairs, normalize=True)
        
        # 准备响应
        results = []
        for i, (pair, original_score) in enumerate(zip(request.pairs, original_scores)):
            result = {
                "query": pair.query,
                "passage": pair.passage,
                "score": float(original_score)
            }
            
            if normalized_scores:
                norm_score = float(normalized_scores[i])
                result["normalized_score"] = norm_score
                result["relevance"] = "高" if norm_score > 0.5 else "低"
            else:
                result["relevance"] = "未知"
                
            results.append(ScoreResult(**result))
        
        return RerankerResponse(scores=results)
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"计算分数失败: {str(e)}")

# 定义批量排序端点 - 对多个查询和段落进行排序
@app.post("/batch_rerank")
async def batch_rerank(request: Dict[str, Any]):
    try:
        queries = request.get("queries", [])
        passages = request.get("passages", [])
        normalize = request.get("normalize", True)
        top_k = request.get("top_k", len(passages))
        
        if not queries or not passages:
            raise HTTPException(status_code=400, detail="查询或段落不能为空")
        
        results = {}
        
        # 为每个查询和每个段落创建对
        for query_id, query in enumerate(queries):
            pairs = [[query, passage] for passage in passages]
            
            # 计算分数
            if normalize:
                scores = reranker_model.compute_score(pairs, normalize=True)
            else:
                scores = reranker_model.compute_score(pairs)
            
            # 组合段落和分数
            passage_scores = list(zip(passages, scores))
            
            # 根据分数对段落排序（降序）
            passage_scores.sort(key=lambda x: x[1], reverse=True)
            
            # 获取top-k结果
            top_results = passage_scores[:top_k]
            
            # 准备结果
            results[f"query_{query_id}"] = {
                "query": query,
                "results": [
                    {
                        "passage": p,
                        "score": float(s),
                        "relevance": "高" if normalize and float(s) > 0.5 else "低"
                    }
                    for p, s in top_results
                ]
            }
        
        return {
            "success": True,
            "results": results
        }
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"批量排序失败: {str(e)}")

# 主程序入口
if __name__ == "__main__":
    # 设置服务端口和主机
    port = int(os.environ.get("PORT", 8002))
    host = os.environ.get("HOST", "0.0.0.0")
    
    print(f"启动BGE Reranker API服务在 http://{host}:{port}")
    
    # 启动服务
    uvicorn.run("reranker_api:app", host=host, port=port, reload=True)
