# -*- coding: utf-8 -*-

import sys
import os
sys.path.append(os.getcwd())

import uvicorn
from fastapi import FastAPI,Body
from fastapi.responses import JSONResponse
from typing import Dict
import torch

app = FastAPI()

from transformers import AutoTokenizer, AutoModel

# config = AutoConfig.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)

model_dir = "/home/ubuntu/code/git/subject-word-extraction/chatglm3_model/chatglm3-6b/snapshots/67d005d386a01d4825649743f41e90f83edd6094/"           #直接提供chatGLM3的存储地址
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()
model = model.eval()

@app.post("/chat")
def f1(data: Dict):
    query = data["query"]
    history = data["history"]
    if history == "":
        history = []

    # history = []

    response, history = model.chat(tokenizer, query, history=history, top_p=0.95, temperature=0.95)
    torch.cuda.empty_cache()

    response = {"response": response,"history":history[:2]}
    return JSONResponse(content=response)


if __name__ == "__main__":
    uvicorn.run(app, host='127.0.0.1', port=7866)