from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import logging
from typing import Dict

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = FastAPI(title="Embedding Similarity API", version="1.0.0")


# 请求模型
class EmbeddingRequest(BaseModel):
    sentence1: str
    sentence2: str


# 响应模型
class EmbeddingResponse(BaseModel):
    score: float
    response_code: int


# 全局变量存储模型
model = None


def load_model():
    """加载embedding模型"""
    global model
    try:
        # 使用sentence-transformers库加载预训练模型
        # 你可以根据需要更换为其他模型，比如：
        # - 'paraphrase-MiniLM-L6-v2' (轻量级，英文)
        # - 'distiluse-base-multilingual-cased' (多语言)
        # - 'paraphrase-multilingual-MiniLM-L12-v2' (多语言，性能更好)
        model_name = 'bge-base-en-v1.5'

        logger.info(f"正在加载模型: {model_name}")
        model = SentenceTransformer(model_name)
        logger.info("模型加载完成!")

    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise e


@app.on_event("startup")
async def startup_event():
    """应用启动时加载模型"""
    load_model()


@app.get("/")
async def root():
    """健康检查接口"""
    return {"message": "Embedding Similarity API is running", "status": "healthy"}


@app.post("/", response_model=Dict)
async def calculate_similarity(request: EmbeddingRequest):
    """
    计算两个句子的embedding相似度

    Args:
        request: 包含sentence1和sentence2的请求体

    Returns:
        Dict: 包含score和response_code的响应
    """
    try:
        if model is None:
            logger.error("模型未加载")
            return {
                "error": "Model not loaded",
                "response_code": 500
            }

        # 检查输入
        if not request.sentence1.strip() or not request.sentence2.strip():
            return {
                "error": "Both sentences must be non-empty",
                "response_code": 400
            }

        logger.info(f"计算相似度: '{request.sentence1}' vs '{request.sentence2}'")

        # 生成embeddings
        embeddings = model.encode([request.sentence1, request.sentence2])

        # 计算余弦相似度
        similarity_matrix = cosine_similarity([embeddings[0]], [embeddings[1]])
        similarity_score = float(similarity_matrix[0][0])

        logger.info(f"相似度分数: {similarity_score}")

        return {
            "data": {"score": similarity_score},
            "response_code": 200
        }

    except Exception as e:
        logger.error(f"计算相似度时出错: {str(e)}")
        return {
            "error": str(e),
            "response_code": 500
        }


@app.get("/health")
async def health_check():
    """详细的健康检查"""
    return {
        "status": "healthy",
        "model_loaded": model is not None,
        "response_code": 200
    }


if __name__ == "__main__":
    # 运行服务器
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=8001,
        log_level="info"
    )

    """
    安装依赖：
    pip install fastapi uvicorn sentence-transformers scikit-learn numpy

    运行服务：
    python this_file.py

    或者使用uvicorn命令：
    uvicorn this_file:app --host 0.0.0.0 --port 8001 --reload

    测试API：
    curl -X POST "http://localhost:8001/" \
         -H "Content-Type: application/json" \
         -d '{"sentence1": "你好世界", "sentence2": "Hello world"}'

    Docker部署示例：
    FROM python:3.9-slim
    WORKDIR /app
    COPY requirements.txt .
    RUN pip install -r requirements.txt
    COPY . .
    EXPOSE 8001
    CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8001"]
    """