import gradio as gr
import os
import torch
from transformers import GemmaTokenizer
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread

MAX_CONVERSATION_LENGTH = 20  # 设置最大会话长度


DESCRIPTION = '''
'''

LICENSE = """
"""

PLACEHOLDER = """
请遵守相关法律，勿发布不适当内容...
"""


css = """
h1 {
  text-align: center;
  display: block;
}
#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("hf-models/Qwen1.5-7B-Chat")
# https://pytorch.org/docs/stable/tensor_attributes.html
# torch_dtype="auto" 会得出 BFloat16，天数智芯 RuntimeError: at::cuda::blas::gemm: not implemented for N3c108BFloat16E
model = AutoModelForCausalLM.from_pretrained("hf-models/Qwen1.5-7B-Chat", torch_dtype=torch.float16, device_map="auto")


# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def chat_Qwen(message: str, 
              history: list, 
              temperature: float, 
              max_new_tokens: int
             ) -> str:
    """
    Generate a streaming response using the llama3-8b model.
    Args:
        message (str): The input message.
        history (list): The conversation history used by ChatInterface.
        temperature (float): The temperature for generating the response.
        max_new_tokens (int): The maximum number of new tokens to generate.
    Returns:
        str: The generated response.
    """
    print(message)
    #model.to("cuda")
    #model.eval()
    conversation = [{"role": "system", "content": "你是一个得力的助手，你遵守法律、道德。"}]
    for user, assistant in history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
    conversation.append({"role": "user", "content": message})
    # 如果会话长度超过最大值，则删除最早的对话记录
    if len(conversation) > MAX_CONVERSATION_LENGTH:
        conversation = conversation[-MAX_CONVERSATION_LENGTH:]
    input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to("cuda")
    # 实现流式生成 https://qwen.readthedocs.io/zh-cn/latest/inference/chat.html
    streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)

    generate_kwargs = dict(
        input_ids= input_ids,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()
    outputs = []
    for text in streamer:
        print(text)
        outputs.append(text)
        yield "".join(outputs)
    torch.cuda.empty_cache()
        

# Gradio block
chatbot=gr.Chatbot(height=500, label='Gitee AI', layout="bubble")

with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
    
    gr.Markdown(DESCRIPTION)
    gr.ChatInterface(
        fn=chat_Qwen,
        chatbot=chatbot,
        submit_btn="提交",
        retry_btn="重试",
        undo_btn="撤销",
        clear_btn="清除",
        stop_btn="停止",
        theme="soft",
        additional_inputs_accordion=gr.Accordion(label="⚙️ 高级设置", open=False, render=False),
        additional_inputs=[
            gr.Slider(minimum=0,
                      maximum=1, 
                      step=0.1,
                      value=0.95, 
                      label="多样性", 
                      render=False),
            gr.Slider(minimum=128, 
                      maximum=3000,
                      step=1,
                      value=512, 
                      label="最大文本长度", 
                      render=False ),
            ],
        examples=[
                ['我的蓝牙耳机坏了，我该去看牙科还是耳鼻喉科？'],
                ['中国历史带给你什么启发？'],
                ['每天吃一粒感冒药，还会感冒吗?'],
                ['列一个火星殖民计划'],
                ['人生的意义是什么？'],
                ['大胆预测，一百年后会发生什么？'],
                ['鲁迅和周树人什么关系'],
                ['写一个 js 代码，实现排序算法'],
            ],
        cache_examples=False,
                     )
    
    gr.Markdown(LICENSE)
    
if __name__ == "__main__":
    demo.queue(2).launch()