from fastapi import FastAPI
from pydantic import BaseModel
from transformers import BertTokenizer, BertModel
import torch

app = FastAPI()

# 加载模型和分词器
model_name = "C:/Users/86182/Desktop/sqsx_lqbz/model_download"  # 替换为BERT模型的本地路径
tokenizer = BertTokenizer.from_pretrained(model_name)
model = BertModel.from_pretrained(model_name)


# 定义输入数据格式
class Query(BaseModel):
    sentence: str


@app.post("/predict/")
def predict(query: Query):
    input_text = query.sentence
    inputs = tokenizer(input_text, return_tensors="pt")
    with torch.no_grad():
        outputs = model(**inputs)

    # 获取句子的表示（通常使用[CLS] token的表示）
    sentence_embedding = outputs.last_hidden_state[:, 0, :].squeeze().tolist()

    return {"sentence_embedding": sentence_embedding}


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)

import requests

url = "http://localhost:8000/predict/"
data = {"sentence": "Hello, how are you?"}
response = requests.post(url, json=data)
print(response.json())
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)

