import sys
import uvicorn
import gc
import json
import torch
import random
import string

from fastapi import FastAPI, HTTPException, Response
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from typing import List, Literal, Optional, Union
from FlagEmbedding import FlagReranker


@asynccontextmanager
async def lifespan(app: FastAPI):
    yield
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.ipc_collect()

app = FastAPI(lifespan=lifespan)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


reranker = None

@app.post("/rerank")
async def rerank(request: dict):
    print(f"request: {request}")
    results = []
    if request.get("query", None):
        if request.get("documents", None):
            index = 0
            for document in request["documents"]:
                 score = reranker.compute_score([request["query"], document], normalize=True)
                 item = {
                         "index": index,
                         "document": {"text": document},
                         "relevance_score": score[0]
                    }
                 results.append(item)
                 index = index + 1

    print(f"results {results}")
    return {"results": results}

if __name__ == "__main__":
    reranker = FlagReranker('AI-ModelScope/bge-reranker-v2-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
    uvicorn.run(app, host='0.0.0.0', port=8000, workers=1)

