from fastapi import FastAPI, Query
import time
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
from transformers import AutoModelForCausalLM, AutoTokenizer


class MyChat:
    def __init__(self):
        print("正在导入语言大模型")
        self.chat_model = None
        print("导入成功")

    def chat_from_message(self, question=None):
        time.sleep(5)
        # 模拟进行推理
        return "这里是回答"


class MyXunziChat:
    def __init__(self, device=1):
        print("正在导入语言大模型")
        self.tokenizer = AutoTokenizer.from_pretrained(r"G:\llm\Xunzillm4cc\Xunzi-Qwen-Chat",
                                                       device_map="cuda:{}".format(device),
                                                       trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(r"G:\llm\Xunzillm4cc\Xunzi-Qwen-Chat",
                                                          device_map="cuda:{}".format(device),
                                                          trust_remote_code=True).eval()
        print("导入成功")

    def chat_from_message(self, question=None):
        response, history = self.model.chat(self.tokenizer, str(question), history=None)
        return response


app = FastAPI()
chatbot = MyXunziChat()  # 初始化MyChat

origins = [
    "*"
]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


@app.post("/chat")
async def chat_with_bot():
    response = chatbot.chat_from_message()  # 调用MyChat类的chat_from_message()方法
    return {"response": response}


@app.get('/chat')
async def chat_get(question=Query(None)):
    print(question)
    response = chatbot.chat_from_message(question=question)
    return {"response": response}


if __name__ == "__main__":
    uvicorn.run(app, host="127.0.0.1", port=8001)
