from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from sentence_transformers import SentenceTransformer
import torch
import os
import logging

# 配置日志记录
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    handlers=[
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

os.environ['TRANSFORMERS_OFFLINE'] = '1'
os.environ['HF_HUB_OFFLINE'] = '1'

app = FastAPI()

# 加载本地模型（可以换成你自己的路径）
MODEL_PATH = "./models/huggingface/xlm-roberta-large"
model = SentenceTransformer(MODEL_PATH)

logger.info(f"Model loaded from {MODEL_PATH}")


class EmbeddingRequest(BaseModel):
    input: list[str]
    model: str = "xlm-roberta-large"
    encoding_format: str = "float"
    normalize: bool = False


@app.post("/embeddings")
def get_embeddings(request: EmbeddingRequest):
    logger.info(f"Received embedding request: {request}")

    if request.model not in ["xlm-roberta-large"]:
        logger.warning(f"Unsupported model requested: {request.model}")
        raise HTTPException(status_code=400, detail="Model not supported")

    try:
        embeddings = model.encode(request.input, normalize_embeddings=request.normalize)
        return {
            "object": "list",
            "data": [
                {
                    "object": "embedding",
                    "embedding": emb.tolist(),
                    "index": i,
                } for i, emb in enumerate(embeddings)
            ],
            "model": request.model,
            "usage": {
                "prompt_tokens": sum(len(s.split()) for s in request.input),
                "total_tokens": sum(len(s.split()) for s in request.input),
            }
        }
    except Exception as e:
        logger.error(f"Error processing request: {e}", exc_info=True)
        raise HTTPException(status_code=500, detail="Internal Server Error")


@app.get("/")
def health_check():
    logger.info("Health check endpoint accessed")
    return {"status": "Embedding API is running", "model": MODEL_PATH}
