import gradio as gr
import time
from bigdl.llm.transformers import AutoModelForCausalLM
from transformers import AutoTokenizer


QWEN_PROMPT_FORMAT = "{prompt} "


load_path = "qwen18chat_int4"
model = AutoModelForCausalLM.load_low_bit(load_path, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(load_path,trust_remote_code=True)


def add_text(history, text):
  _, history = model.chat(tokenizer, text, history=history)
  return history, gr.Textbox(value="", interactive=False)


def bot(history):
  response = history[-1][1]
  history[-1][1] = ""
  for character in response:
    history[-1][1] += character
    time.sleep(0.05)
    yield history


with gr.Blocks() as demo:
  chatbot = gr.Chatbot(
    [], 
    elem_id="chatbot",
    bubble_full_width=False,
  )


  with gr.Row():
    txt = gr.Textbox(
      scale=4,
      show_label=False,
      placeholder="Enter text and press enter",
      container=False,
    )


  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
    bot, chatbot, chatbot, api_name="bot_response"
  )
  txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)


demo.queue()
demo.launch()