
import os
from typing import Literal
print("导入 torch")
import torch
print("导入 predictor")
from predictor import LanguageModelPredictor, GenerateTextConfig
print("导入 gradio")
import gradio as gr
from threading import Timer

print("加载程序")
model_dir = [
    "./model",
    "./models"
]
models = []
for dir in model_dir:
    if os.path.exists(dir) is False:
        continue
    models += list(filter(lambda item: os.path.isdir(os.path.join(dir, item)), os.listdir(dir)))
print("models: " + str(models))

selected_model_name = models[0] if len(models) != 0 else ""
load_status = "未加载任何模型"
# dtype
DEFAULT_DTYPE = torch.bfloat16
# 生成参数
DEFAULT_TOP_K = 80
DEFAULT_TOP_P = 0.9
DEFAULT_TEMPERATURE = 0.9
DEFAULT_MAX_NEW_TOKENS = 512
predictor = None

if torch.cuda.is_available():
    torch.device("cuda")
    torch.backends.cuda.matmul.allow_tf32 = True

def get_model_path(model_name: str):
    for dir in model_dir:
        model_path = dir + "/" + model_name
        if os.path.exists(model_path):
            return dir + "/" + model_name

def load_model(model_name: str, dtype_str="bfloat16", use_airllm=False):
    global predictor, load_status

    try:
        torch_dtype = getattr(torch, dtype_str) or DEFAULT_DTYPE
        if predictor is not None:
            predictor.release()
        model_path = get_model_path(model_name)
        predictor = LanguageModelPredictor(
            model_path,
            torch_dtype,
            use_airllm,
        )
    except Exception as e:
        gr.Warning("模型加载失败")
        raise gr.Error(str(e))

    load_status = f"已加载模型: {model_name}（{torch_dtype}）"
    return load_status

@torch.inference_mode()
def chat(
        input_text,
        history: list[dict] = [],
        system_prompt: str = "",
        function: Literal["generate", "chat"] = "generate",
        streaming = False,
        top_k: int = DEFAULT_TOP_K,
        top_p: float = DEFAULT_TOP_P,
        temperature: float = DEFAULT_TEMPERATURE,
        max_new_token: int = DEFAULT_MAX_NEW_TOKENS
    ):
    global predictor
    if predictor is None:
        raise gr.Error("请先加载模型")

    # 预处理
    print("\nuser: " + input_text)
    if history is None:
        history = []
    if system_prompt is not None and len(system_prompt) != 0:
        history.append(gr.ChatMessage("system", system_prompt))
    history.append(gr.ChatMessage("user", input_text))

    # 生成
    try:
        assistant_response = ""
        if function == "generate":
            assistant_response = predictor.generate_text(
                history,
                max_new_token,
                GenerateTextConfig(
                    streaming=streaming,
                    temperature=temperature,
                    top_k=top_k,
                    top_p=top_p
                )
            )
        else:
            raise Exception("错误的生成方式")
    except Exception as e:
        raise gr.Error(str(e))

    # 返回
    if streaming:
        text = ""
        for chunk in assistant_response:
            if predictor.eos_token is not None:
                chunk = chunk.replace(predictor.eos_token, "")
            text += chunk
            # print("assistant: " + text)
            yield gr.ChatMessage("assistant", text)
    else:
        print("assistant: " + assistant_response)
        yield gr.ChatMessage("assistant", assistant_response)

# 自定义CSS样式
css = """
    .title {
        font-size: 24px;
        font-weight: bold;
        color: #333;
        text-align: center;
        margin-bottom: 20px;
    }
    .description {
        font-size: 16px;
        color: #666;
        text-align: center;
        margin-bottom: 20px;
    }
"""
with gr.Blocks(css=css) as demo:
    gr.Markdown("<div class='title'>语言大模型对话</div>")
    gr.Markdown("<div class='description'>这是一个基于Gradio的语言大模型对话界面。</div>")

    with gr.Row():
        with gr.Column(scale=3):
            with gr.Column():
                gr.Markdown("Model")
                with gr.Row():
                    dtype_selector = gr.Dropdown(
                        [
                            "bfloat16",
                            "float16",
                            "float32",
                            "float64",
                            "uint8",
                            'int8',
                            "int16",
                            'int32',
                            "int64"
                        ],
                        label="选择数据类型",
                        value="bfloat16",
                        interactive=True
                    )
                    model_selector = gr.Dropdown(models, label="选择模型", value=selected_model_name)
                    airllm_radio = gr.Radio([True, False], label="使用AirLLM", value=False)

                status_text = gr.Textbox(value=load_status, label="加载状态", lines=1, interactive=False)
                load_button = gr.Button("加载模型")

            gr.Markdown("Chat")
            chatbot = gr.Chatbot(type="messages")
            with gr.Row():
                input_text = gr.Textbox(label="输入", placeholder="请输入您的问题...")

        with gr.Column():
            gr.Markdown("Actions")
            clear_button = gr.ClearButton(chatbot, "清空历史")

            gr.Markdown("System Prompt")
            system_prompt = gr.Textbox(label="System Prompt", lines=2, placeholder="请输入提示词")

            gr.Markdown("Options")
            with gr.Row():
                function_radio = gr.Radio(["generate", "chat"], label="选择生成方式", value="generate")
                stream_radio = gr.Radio([True, False], label="流式输出", value=True)

            top_k_slider = gr.Slider(
                minimum=0, maximum=100, value=DEFAULT_TOP_K, step=1,
                label="Top-k"
            )
            top_p_slider = gr.Slider(
                minimum=0.1, maximum=1.0, value=DEFAULT_TOP_P, step=0.05,
                label="Top-p (nucleus sampling)"
            )
            temperature_slider = gr.Slider(
                minimum=0.1, maximum=1.5, value=DEFAULT_TEMPERATURE, step=0.05,
                label="Temperature"
            )
            max_new_tokens_slider = gr.Slider(
                minimum=50, maximum=2048, value=DEFAULT_MAX_NEW_TOKENS, step=2,
                label="Max New Tokens"
            )

    dtype_selector.change(
        lambda nv: setattr(dtype_selector, "value", nv),
        inputs=dtype_selector
    )
    load_button.click(
        fn=lambda: "正在加载模型...",
        inputs=None,
        outputs=status_text
    ).then(
        fn=load_model,
        inputs=[
            model_selector,
            dtype_selector,
            airllm_radio
        ],
        outputs=status_text
    )
    gr.ChatInterface(
        chat,
        type="messages",
        chatbot=chatbot,
        textbox=input_text,
        additional_inputs=[
            system_prompt,
            function_radio,
            stream_radio,
            top_k_slider,
            top_p_slider,
            temperature_slider,
            max_new_tokens_slider
        ]
    )



demo.launch()

