import os
import threading
from pathlib import Path
from typing import Union

import gradio as gr
import torch
from peft import AutoPeftModelForCausalLM
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    PreTrainedModel,
    PreTrainedTokenizer,
    StoppingCriteria,
    StoppingCriteriaList,
    TextIteratorStreamer
)

# 定义模型和分词器的类型别名
ModelType = PreTrainedModel
TokenizerType = PreTrainedTokenizer

# 模型的路径
MODEL_PATH = os.environ.get('MODEL_PATH', './model/ZhipuAI/glm-4-9b-chat')
TOKENIZER_PATH = os.environ.get('TOKENIZER_PATH', MODEL_PATH)

# 定义默认系统提示词
DEFAULT_SYSTEM_PROMPT = "您是一个提供准确且简洁回答的智能助手，你叫avery。"

def _resolve_path(path: Union[str, Path]):
    """
    将给定的路径转换为绝对路径，并解析为标准路径格式。
    """
    return Path(path).expanduser().resolve()

def load_model_and_tokenizer(model_dir: Union[str, Path], trust_remote_code=True) -> tuple[ModelType, TokenizerType]:
    model_dir = _resolve_path(model_dir)
    print(f"加载模型从: {model_dir}")
    if (model_dir / 'adapter_config.json').exists():
        print("检测到 PEFT 模型")
        model = AutoPeftModelForCausalLM.from_pretrained(model_dir, trust_remote_code=trust_remote_code, device_map='auto')
        tokenizer_dir = model.peft_config['default'].base_model_name_or_path
    else:
        print("加载普通模型")
        model = AutoModelForCausalLM.from_pretrained(model_dir, trust_remote_code=True, device_map='auto')
        tokenizer_dir = model_dir

    print(f"加载分词器从: {tokenizer_dir}")
    tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, trust_remote_code=True, use_fast=False)

    if tokenizer is None:
        raise ValueError("分词器加载失败，tokenizer 为 None")

    print(f"分词器加载成功")
    return model, tokenizer

# 加载模型和分词器
model, tokenizer = load_model_and_tokenizer(MODEL_PATH)

class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = model.config.eos_token_id
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False

def parse_text(text):
    lines = text.split("\n")
    lines = [line for line in lines if line != ""]
    count = 0
    for i, line in enumerate(lines):
        if "```" in line:
            count += 1
            items = line.split('`')
            if count % 2 == 1:
                language = items[-1]
                lines[i] = f'<pre><code class="language-{language}">'
            else:
                lines[i] = '</code></pre>'
        else:
            if i > 0:
                if count % 2 == 1:
                    line = line.replace("<", "<")
                    line = line.replace(">", ">")
                    line = line.replace("&", "&")
                    line = line.replace(" ", " ")
                    line = line.replace("`", "")
                    line = line.replace("`", "\\`")
                    line = line.replace("*", "*")
                    line = line.replace("_", "_")
                    line = line.replace("-", "-")
                    line = line.replace(".", ".")
                    line = line.replace("!", "!")
                    line = line.replace("(", "(")
                    line = line.replace(")", ")")
                    line = line.replace("$", "$")
                    line = line.replace("^", "^")
                    line = line.replace("+", "+")
                    line = line.replace(",", ",")
                lines[i] = f"<br>{line}" if line else "<br>"
    text = "".join(lines)
    return text

def predict(history, prompt, max_length, top_p, temperature):
    stop = StopOnTokens()
    messages = []

    # 使用传入的 prompt（默认或用户修改的）
    if prompt:
        messages.append({"role": "system", "content": prompt})

    # 构建对话历史
    for idx, (user_msg, model_msg) in enumerate(history):
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if model_msg:
            messages.append({"role": "assistant", "content": model_msg})

    model_inputs = tokenizer.apply_chat_template(
        messages,
        tokenize=True,
        add_generation_prompt=True,
        return_tensors="pt",
    ).to(model.device) #next(model.parameters()).device

    streamer = TextIteratorStreamer(
        tokenizer,
        timeout=60,
        skip_prompt=True,
        skip_special_tokens=True
    )

    generate_kwargs = {
        "input_ids": model_inputs,
        "streamer": streamer,
        "max_new_tokens": max_length,
        "top_p": top_p,
        "temperature": temperature,
        "do_sample": True,
        "eos_token_id": tokenizer.eos_token_id,
        "stopping_criteria": StoppingCriteriaList([stop]),
        "repetition_penalty": 1.2
    }

    thread = threading.Thread(target=model.generate, kwargs=generate_kwargs)
    thread.start()

    history[-1][1] = ""
    try:
        for new_token in streamer:
            if new_token:
                history[-1][1] += new_token
            yield history
    finally:
        thread.join()

with gr.Blocks(title='我的AI聊天机器人') as demo:
    gr.HTML("<h1 align='center'>聊天机器人</h1>")
    chatbot = gr.Chatbot(label='AI', elem_id='chatbot', height=550)
    with gr.Row():
        with gr.Column(scale=3):
            with gr.Column(scale=12):
                user_input = gr.Textbox(show_label=False, placeholder="输入文字并回车", lines=10, container=False)
                submit_btn = gr.Button("发送", variant="primary")
        with gr.Column(scale=1):
            prompt_input = gr.Textbox(
                label="系统提示词",
                placeholder="请输入系统提示词",
                lines=10,
                container=False,
                value=DEFAULT_SYSTEM_PROMPT  # 默认显示
            )
            prompt_btn = gr.Button("设置系统提示词", variant="primary")
        with gr.Column(scale=1):
            clear_btn = gr.Button("清除记录")
            max_length = gr.Slider(
                minimum=1,
                maximum=8192,
                value=2048,
                step=1.0,
                interactive=True,
                label="最大长度",
            )
            top_p = gr.Slider(
                minimum=0.1,
                maximum=1,
                value=0.95,
                step=0.01,
                interactive=True,
                label="top_p",
            )
            temperature = gr.Slider(
                minimum=0,
                maximum=1,
                value=0.7,
                step=0.01,
                interactive=True,
                label="温度",
            )

    def set_prompt(prompt_input):
        return [[parse_text(prompt_input), "成功设置提示词"]]

    def user_question(user_input, history):
        if not user_input.strip():
            return "", history
        return "", history + [[parse_text(user_input), ""]]

    prompt_btn.click(set_prompt, inputs=[prompt_input], outputs=chatbot)

    submit_btn.click(user_question, inputs=[user_input, chatbot], outputs=[user_input, chatbot], queue=False).then(
        predict,
        [chatbot, prompt_input, max_length, top_p, temperature],
        chatbot,
        show_progress=True,
    )

    clear_btn.click(lambda: (None, DEFAULT_SYSTEM_PROMPT), inputs=None, outputs=[chatbot, prompt_input], queue=False)

if __name__ == '__main__':
    demo.queue()
    demo.launch(server_name='0.0.0.0', server_port=3353, inbrowser=True, share=False)