from modelscope import AutoModelForCausalLM, AutoTokenizer
from flask import Flask, request, Response, redirect
import threading, datetime
from flask_cors import CORS, cross_origin


model_name = "output/qwen2_5-0_5b-instruct/v4-20241108-120727/checkpoint-420-merged"
model = AutoModelForCausalLM.from_pretrained(
    model_name, torch_dtype="auto", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)


app = Flask(__name__, static_folder="static", static_url_path="")


def llm_reply(messages: list):
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
    generated_ids = model.generate(**model_inputs, max_new_tokens=512)
    generated_ids = [
        output_ids[len(input_ids) :]
        for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response


@app.post("/chatglm")
def chatglm():
    json_post_list = request.get_json()
    prompt = json_post_list.get("prompt")
    history: list = json_post_list.get("history")
    print("json_post_list", json_post_list)

    # 构建对话
    messages = []
    for his in history:
        for index, value in enumerate(his):
            if index % 2 == 0:
                messages.append({"role": "user", "content": value})
            else:
                messages.append({"role": "assistant", "content": value})
        messages.append({"role": "user", "content": prompt})

    response = ""
    if len(messages) > 0:
        response = llm_reply(messages)
        history.append(response)

    now = datetime.datetime.now()
    time = now.strftime("%Y-%m-%d %H:%M:%S")
    answer = {"response": response, "history": history, "status": 200, "time": time}
    return answer


# 并行度为2
semaphore = threading.Semaphore(10)


@app.route("/chat", methods=["POST"])
@cross_origin()
def chat():
    with semaphore:
        jso = request.get_json()
        print(jso)

        # ai
        response = llm_reply(jso)
        print([response])
        return [response]


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=5000)
