
import gradio as gr
import random
import time
def random_response(message, history):
    return random.choice(["Yes", "No"])


from modelscope import snapshot_download
from transformers import AutoModelForCausalLM, AutoTokenizer

# Downloading model checkpoint to a local dir model_dir
model_dir = snapshot_download('qwen/Qwen-7B-Chat',cache_dir='./check')
# model_dir = snapshot_download('qwen/Qwen-7B-Chat')
# model_dir = snapshot_download('qwen/Qwen-14B')
# model_dir = snapshot_download('qwen/Qwen-14B-Chat')

# Loading local checkpoints
# trust_remote_code is still set as True since we still load codes from local dir instead of transformers
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_dir,
    device_map="auto",
    trust_remote_code=True
).eval()

print(model.device)
promute = [('飞行高度10m, 打开避障，喷幅2m，以这种格式输出信息：飞行高度：\n 是否打开避障：\n，喷幅：\n',
           '飞行高度：10m\n 是否打开避障：是 \n 喷幅：2m \n'),
           ('飞行高度2m, 关闭避障，以这种格式输出信息：飞行高度：\n是否打开避障：\n，喷幅：\n',
           '飞行高度：2m\n 是否打开避障：否 \n 喷幅：默认 \n'),
           ('飞机高度 4m, 要能够自动避障，喷幅5m，以这种格式输出信息：飞行高度：\n 是否打开避障：\n，喷幅：\n',
            '飞行高度：4m\n 是否打开避障：是 \n 喷幅：5m \n')
           ]

def get_response(message, history=None):
    if history is None or len(history)<1:
        response, history = model.chat(tokenizer, message, history=None)
    else:
        response, history = model.chat(tokenizer, message, history=history)
    return response, history


with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    def respond(message, chat_history):
        bot_message, history = get_response(message, chat_history)
        chat_history.append((message, bot_message))
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])

demo.launch(server_name='0.0.0.0')