#
import argparse
import os
from pathlib import Path
from threading import Thread
from typing import Union, Dict

import gradio as gr
import torch
from peft import AutoPeftModelForCausalLM, PeftModelForCausalLM
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    PreTrainedModel,
    PreTrainedTokenizer,
    PreTrainedTokenizerFast,
    StoppingCriteria,
    StoppingCriteriaList,
    TextIteratorStreamer
)

class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = IraLlmWeb.model.config.eos_token_id
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:
                return True
        return False

class IraLlmWeb(object):
    ModelType = Union[PreTrainedModel, PeftModelForCausalLM]
    TokenizerType = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
    model = None
    tokenizer = None

    def __init__(self):
        self.name = 'apps.ira.core.ira_llm_web.IraLlmWeb'

    @staticmethod
    def startup(params:Dict = {}) -> None:
        # 设置模型下载的路径
        os.environ['HF_HOME'] = '/home/psdz/diskc/yantao/adev/GLM-4/work/models/hf'
        MODEL_PATH = 'THUDM/glm-4-9b-chat' # os.environ.get('MODEL_PATH', 'THUDM/glm-4-9b-chat')
        TOKENIZER_PATH = os.environ.get("TOKENIZER_PATH", MODEL_PATH)
        IraLlmWeb.model, IraLlmWeb.tokenizer = IraLlmWeb.load_model_and_tokenizer(MODEL_PATH, trust_remote_code=True)
        with gr.Blocks() as demo:
            gr.HTML("""<h1 align="center">智能化雷达代码大模型</h1>""")
            chatbot = gr.Chatbot(type='messages')

            with gr.Row():
                with gr.Column(scale=3):
                    with gr.Column(scale=12):
                        user_input = gr.Textbox(show_label=False, placeholder="请输入......", lines=10, container=False)
                    with gr.Column(min_width=32, scale=1):
                        submitBtn = gr.Button("提交")
                with gr.Column(scale=1):
                    prompt_input = gr.Textbox(show_label=False, placeholder="提示", lines=10, container=False)
                    pBtn = gr.Button("设置提示词")
                with gr.Column(scale=1):
                    emptyBtn = gr.Button("重置对话")
                    max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="最大长度", interactive=True)
                    top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
                    temperature = gr.Slider(0.01, 1, value=0.6, step=0.01, label="温度", interactive=True)


            def user(query, history):
                # return "", history + [[query, ""]]
                # 确保返回格式与type="messages"相匹配
                new_message = {"role": "user", "content": query}
                return '', history + [new_message]


            def set_prompt(prompt_text):
                # return [[prompt_text, "成功设置prompt"]]
                return {"role": "system", "content": prompt_text}, "成功设置prompt"


            pBtn.click(set_prompt, inputs=[prompt_input], outputs=chatbot)
            # pBtn.click(set_prompt, inputs=[prompt_input], outputs=[chatbot, gr.Textbox(visible=False)])

            submitBtn.click(user, [user_input, chatbot], [user_input, chatbot], queue=False).then(
                IraLlmWeb.predict, [chatbot, prompt_input, max_length, top_p, temperature], chatbot
            )
            emptyBtn.click(lambda: (None, None), None, [chatbot, prompt_input], queue=False)
        demo.queue()
        demo.launch(server_name="192.168.1.115", server_port=8008, inbrowser=False, share=False)




            

    @staticmethod
    def _resolve_path(path: Union[str, Path]) -> Path:
        return Path(path).expanduser().resolve()

    @staticmethod
    def load_model_and_tokenizer(
                model_dir: Union[str, Path], trust_remote_code: bool = True
        ) -> tuple[ModelType, TokenizerType]:
            # model_dir = IraLlmWeb._resolve_path(model_dir)
            model = AutoModelForCausalLM.from_pretrained(
                model_dir, trust_remote_code=trust_remote_code, device_map='auto'
            )
            tokenizer_dir = model_dir
            # if (model_dir / 'adapter_config.json').exists():
            #     model = AutoPeftModelForCausalLM.from_pretrained(
            #         model_dir, trust_remote_code=trust_remote_code, device_map='auto'
            #     )
            #     tokenizer_dir = model.peft_config['default'].base_model_name_or_path
            # else:
            #     model = AutoModelForCausalLM.from_pretrained(
            #         model_dir, trust_remote_code=trust_remote_code, device_map='auto'
            #     )
            #     tokenizer_dir = model_dir
            tokenizer = AutoTokenizer.from_pretrained(
                tokenizer_dir, trust_remote_code=trust_remote_code, use_fast=False
            )
            return model, tokenizer
        
    @staticmethod
    def predict(history, prompt, max_length, top_p, temperature):
        stop = StopOnTokens()
        messages = []
        if prompt:
            messages.append({"role": "system", "content": prompt})
        for idx,  item in enumerate(history): # (user_msg, model_msg)
            if item['role'] == 'user':
                user_msg = item['content']
                model_msg = ''
            else:
                user_msg = ''
                model_msg = item['content']
            if prompt and idx == 0:
                continue
            if idx == len(history) - 1 and not model_msg:
                messages.append({"role": "user", "content": user_msg})
                break
            if user_msg:
                messages.append({"role": "user", "content": user_msg})
            if model_msg:
                messages.append({"role": "assistant", "content": model_msg})

        model_inputs = IraLlmWeb.tokenizer.apply_chat_template(messages,
                                                    add_generation_prompt=True,
                                                    tokenize=True,
                                                    return_tensors="pt").to(next(IraLlmWeb.model.parameters()).device)
        streamer = TextIteratorStreamer(IraLlmWeb.tokenizer, timeout=60, skip_prompt=True, skip_special_tokens=True)
        # 创建注意力掩码
        attention_mask = (model_inputs != 0).type(torch.int32)
        generate_kwargs = {
            "input_ids": model_inputs,
            "streamer": streamer,
            "max_new_tokens": max_length,
            "do_sample": True,
            "top_p": top_p,
            "temperature": temperature,
            "stopping_criteria": StoppingCriteriaList([stop]),
            "repetition_penalty": 1.2,
            "eos_token_id": IraLlmWeb.model.config.eos_token_id,
            "attention_mask": attention_mask,
        }
        t = Thread(target=IraLlmWeb.model.generate, kwargs=generate_kwargs)
        t.start()
        history += [{'role': 'assistant', 'content': ''}]
        for new_token in streamer:
            if new_token:
                # history[-1][1] += new_token
                history[-1]['content'] += new_token
            yield history

    

def main(args:argparse.Namespace = {}) -> None:
    print(f'智能雷达智能体 IraWeb v0.0.2')
    params = {}
    IraLlmWeb.startup(params=params)

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--run_mode', action='store',
        type=int, default=1, dest='run_mode',
        help='run mode'
    )
    return parser.parse_args()

if '__main__' == __name__:
    args = parse_args()
    main(args=args)