import gradio as gr
import time
from transformers import AutoTokenizer, AutoModel

print('-------------------------------------------------------')
print('正在加载模型……')
model_name = "THUDM/chatglm2-6b-int4"
# model_name = "/root/.cache/huggingface/hub/models--THUDM--chatglm2-6b-int4/snapshots/66ecaf1db3a5085714e133357ea4824b69698743"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModel.from_pretrained(model_name,trust_remote_code=True).cuda()
model = model.eval()
print('模型已经加载完毕。')


def chat_once(xinput):
    """输入文本，用模型返回输出。"""
    xoutput, history = model.chat(tokenizer, xinput, history=[])
    return xoutput, ''


# 构建Gradio界面
with gr.Blocks(analytics_enabled=False) as demo:

    # 输入文本框
    cmp_input = gr.Textbox(interactive=True, label='输入')

    # 输入按钮
    cmp_input_button = gr.Button('发送到AutoDL', )

    # 输出文本框
    cmp_output = gr.Textbox(interactive=False, label='输出')

    # 输入按钮点击事件
    cmp_input_button.click(chat_once, [cmp_input, ], [
        cmp_output,
        cmp_input,
    ], queue=False)

# 使用queue（目前这个例子太简单，没有用到queue，后面会用到。）
demo.queue(
    concurrency_count=10,
    status_update_rate='auto',
    # status_update_rate=0.02,
)

# 启动Gradio
# demo.launch(server_name='0.0.0.0', server_port=6006, share=True, debug=True)  # 带内网穿透
demo.launch(server_name='0.0.0.0', server_port=6006, debug=True)
