from transformers import AutoModel, AutoTokenizer
import gradio as gr
import torch


title = "🤖法小猿 AI Chatbot"
description = "法小猿提供法律咨询服务，立即进行对话"
examples = [
    ["中国民法典规定自然人有哪些权利"],
    ["买卖人体细胞属于犯罪吗"],
    ["侵犯他人肖像权将受到什么惩罚"],
]

# 检测是否有可用的 GPU，如果没有则使用 CPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

print("device>>>", device)


tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
model = (
    AutoModel.from_pretrained("THUDM/chatglm3-6b", trust_remote_code=True)
    .half()
    .to(device)
)


def predict(input, history=[]):

    # 确保历史记录在正确的设备上
    if history:
        history = torch.cat(history, dim=-1).to(device)
    else:
        history = torch.LongTensor([]).to(device)

    # tokenize the new input sentence
    new_user_input_ids = tokenizer.encode(
        input + tokenizer.eos_token, return_tensors="pt"
    ).to(device)

    # append the new user input tokens to the chat history
    bot_input_ids = torch.cat([history, new_user_input_ids], dim=-1)

    # generate a response
    with torch.no_grad():  # 禁用梯度计算
        response_history = model.generate(
            bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
        ).tolist()

    # # convert the tokens to text, and then split the responses into lines
    # response = tokenizer.decode(response_history[0]).split("<|endoftext|>")
    # print("decoded_response-->>" + str(response))
    # response = [
    #     (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
    # ]  # convert to tuples of list
    # print("response-->>" + str(response))

    # return response, response_history

    # Decode the response
    response_text = tokenizer.decode(response_history[0], skip_special_tokens=True)
    response = response_text.split(tokenizer.eos_token)

    # Print debug information
    print("decoded_response-->>" + str(response_text))
    print("response-->>" + str(response))

    # Convert to the expected format (list of tuples)
    response_pairs = []
    for i in range(0, len(response) - 1, 2):
        if i + 1 < len(response):
            response_pairs.append((response[i].strip(), response[i + 1].strip()))

    return response_pairs, response_history


gr.Interface(
    fn=predict,
    title=title,
    description=description,
    examples=examples,
    inputs=["text", "state"],
    outputs=["chatbot", "state"],
    theme="finlaymacklon/boxy_violet",
).launch()
