from fastapi import FastAPI
app = FastAPI()
from pydantic import BaseModel
from datetime import datetime
import uvicorn

print("正在初始化，请耐心等候……")
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline
import torch
import torch_npu

model_path='/home/linweibin/lwb2/model/v1/MindLLM-1b3-chat-zh-v2.0'
device='npu:2'
#device='auto'
print(f"{device}")


import redis
# 创建连接池
pool = redis.ConnectionPool(
    host='117.72.210.65',
    port=6379,
    db=0,
    password='zheng@123456',
    decode_responses=True,
    max_connections=10  # 最大连接数
)
r = redis.Redis(connection_pool=pool)


tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.max_length = 1024
model = AutoModelForCausalLM.from_pretrained(model_path).to(device)

generator = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=device)



class MyClassModel(BaseModel):
    question: str


@app.get("/")
def read_root():
    return {"Hello": "World"}



@app.post("/mind/api")
def create_myclass(myclass: MyClassModel):
    question=myclass.question

    t1 = datetime.now()
    key="mind8091"
    value='mind8091-123456'
    expire_time = 60  # 过期时间（秒）
    result = r.set(key, value, nx=True, ex=expire_time)
    if result:
        context = f"<user>\n{question}\n<assistant>\n"
        outputs = generator(context, max_new_tokens=1024, do_sample=True, num_beams=8, repetition_penalty=0.5,
                            no_repeat_ngram_size=5, return_full_text=False)
        output = outputs[0]['generated_text']
        #回答完及时删除key
        r.delete(key)
    else:
        output = "服务繁忙，请稍后重试"


    t2 = datetime.now()
    second = (t2 - t1).total_seconds()
    print("问题：" + question)
    print("输出："+output)
    print(f"耗时{second}")

    #return {"output": f"{answer}"}

    return {
        "code": "200",
        "msg": "success",
        "data": {"output": f"{output}"}
    }



if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8091)