from transformers import AutoModelForCausalLM, AutoTokenizer



from fastapi import FastAPI
import re
app = FastAPI()
from pydantic import BaseModel
from datetime import datetime
import uvicorn

#EMBEDDING_DEVICE = "cuda:6"
device = "auto:6"  # the device to load the model onto
#EMBEDDING_DEVICE = "auto"

model = AutoModelForCausalLM.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B", torch_dtype="auto",device_map=device)

tokenizer = AutoTokenizer.from_pretrained("/home/zhengzhenzhuang/models/qwen/Qwen2-7B")


def qa(question):
    prompt = question
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(device)

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response


class MyClassModel(BaseModel):
    question: str


@app.get("/qwen2/7B/test")
def read_root():
    return {"Hello": "World"}


@app.post("/qwen2/7B/api")
async def create_myclass(myclass: MyClassModel):
    t1 = datetime.now()

    question = myclass.question
    print(question)
    answer = qa(question)
    print(answer)

    t2 = datetime.now()
    second = (t1 - t2).total_seconds()

    print(f"耗时{second}")
    #return {"answer": f"{answer}"}

    # 使用正则表达式找到第一个\n后面跟着一个英文字符的位置
    match = re.search(r'\n[a-zA-Z]', answer)

    # 如果找到了匹配项
    if match:
        # 获取匹配项的开始位置（索引），这个位置实际上是\n的位置
        index = match.start()
        # 输出\n之前的字符串
        return answer[:index]
    else:
        return answer


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=8092)
