from typing import Optional, Any

import torch
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig, TextIteratorStreamer
from langchain.llms.base import LLM
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
import langchain_core.messages
from threading import Thread
import gradio as gr

torch.cuda.empty_cache()
# "transformers==4.51.0",
# "accelerate==0.26.0",
# https://hf-mirror.com/Qwen/Qwen3-14B/tree/main
path = r'M:\moudels\Qwen314B\Qwen3-14B'

#
# tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=True)
#
# model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto", load_in_4bit=True,
#                                   bnb_4bit_compute_dtype=torch.float16)
# model = model.eval()


# 4位量化配置
quantization_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.float16,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True
)


class QW3(LLM):
    # 必须写成 类属性
    max_length: int = 512
    do_sample: bool = True
    temperature: float = 0.7
    top_p: float = 0.5
    history: list = []

    tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained(path,
                                                             trust_remote_code=True,
                                                             )
    model: AutoModelForCausalLM = AutoModelForCausalLM.from_pretrained(path,
                                                                       device_map="auto",
                                                                       quantization_config=quantization_config,
                                                                       trust_remote_code=True
                                                                       ).eval()

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self):
        return "qw3"

    def _call(self, prompt, stop=None) -> str:
        """普通聊天模式（一次性返回完整回复）"""
        # 将 LangChain 消息格式转换为标准字典格式
        systemStr = prompt.split("\n")[0].split(":")[-1]
        userStr = prompt.split("\n")[1].split(":")[-1]
        print(systemStr)
        print(userStr)

        # 构建对话格式
        messages = [
            {"role": "system", "content": systemStr},
            {"role": "user", "content": userStr}
        ]

        # 应用聊天模板
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
        # 生成回复
        generated_ids = self.model.generate(
            **model_inputs,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )
        # 解码输出
        response = self.tokenizer.batch_decode(
            generated_ids[:, model_inputs.input_ids.shape[1]:],
            skip_special_tokens=True
        )[0]
        return response.strip()

    def stream_chat(self, messages):
        """流式聊天模式（逐字返回回复）"""
        # 将 LangChain 消息格式转换为标准字典格式
        # systemStr = prompt.split("\n")[0].split(":")[-1]
        # userStr = prompt.split("\n")[1].split(":")[-1]
        # print(systemStr)
        # print(userStr)
        # # 构建对话格式
        # messages = [
        #     {"role": "system", "content": systemStr},
        #     {"role": "user", "content": userStr}
        # ]
        # 应用聊天模板
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)

        # 创建流式处理器
        streamer = TextIteratorStreamer(
            self.tokenizer,
            skip_prompt=True,
            skip_special_tokens=True
        )

        # 启动生成线程
        generation_kwargs = dict(
            **model_inputs,
            streamer=streamer,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )

        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
        thread.start()

        # 逐字生成回复
        for text_chunk in streamer:
            yield text_chunk

    def stream_chat_ui(self, chatStr, his):
        print(chatStr)
        """流式聊天模式（逐字返回回复）"""
        # 将 LangChain 消息格式转换为标准字典格式
        # systemStr = prompt.split("\n")[0].split(":")[-1]
        # userStr = prompt.split("\n")[1].split(":")[-1]
        # print(systemStr)
        # print(userStr)
        # # 构建对话格式
        messages = [
            {"role": "system", "content": ""},
            {"role": "user", "content": chatStr}
        ]
        print(messages)
        print(his)
        # 应用聊天模板
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)

        # 创建流式处理器
        streamer = TextIteratorStreamer(
            self.tokenizer,
            skip_prompt=True,
            skip_special_tokens=True
        )

        # 启动生成线程
        generation_kwargs = dict(
            **model_inputs,
            streamer=streamer,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )

        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
        thread.start()

        # 逐字生成回复
        res = ""
        for text_chunk in streamer:
            res += text_chunk
            yield res


# 定义一个函数，用于修改消息格式
def get_messages(messages):
    result = []
    for item in messages:
        print(item)
        print(type(item))
        if isinstance(item, langchain_core.messages.system.SystemMessage):
            print(1)
            result.append({"role": "system", "content": item.content})
        elif isinstance(item, langchain_core.messages.human.HumanMessage):
            print(2)
            result.append({"role": "user", "content": item.content})
    return result


CSS = """
*{
    font-family: bold STKaiti, Kaiti SC, Kaiti, BlinkMacSystemFont, Helvetica Neue, PingFang SC, Microsoft YaHei, Source Han Sans SC, Noto Sans CJK SC, WenQuanYi Micro Hei, Arial, sans-serif;
    font-size: 22px;
}
think{
    font-size: 12px;
}
"""

if __name__ == '__main__':
    mod = QW3()
    print("加载完成")
    # res2 = mod.invoke("你是谁？")
    # print(res2)
    # res3 = mod.invoke("西安的小吃")
    # print(res3)
    # res4 = mod.invoke("你刚刚说的是哪的小吃")
    # print(res4)

    # for res in mod.stream_chat("你好"): pass
    # for res in mod.stream_chat("西安的小吃"): pass
    # for res in mod.stream_chat("你刚刚说的是哪的小吃"): pass

    # prompt = ChatPromptTemplate.from_messages([
    #     ("system", "你是一个面馆点餐机器人"),
    #     ("user", "{input}")
    # ])
    # message = prompt.invoke({"input": "你是谁"}).to_messages()
    # for item in mod.stream_chat(get_messages(message)): print(item, end="", flush=True)

    # chat_interface = gr.ChatInterface(
    #     fn=mod.stream_chat_ui,
    #     chatbot=gr.Chatbot(sanitize_html=False, type="messages"),
    #     type="messages",
    #     title="Qwen3-14B机器人",
    #     description="与Qwen3-14B模型进行实时对话",
    #     submit_btn="提交",
    #     stop_btn="撤销",
    #     css=CSS
    # )
    # chat_interface.launch()
    # 提示词，模板函数

    prompt = ChatPromptTemplate.from_messages([
        ("system", "你是一个面馆点餐机器人"),
        ("user", "{input}")
    ])
    chain = prompt | mod
    chainRes = chain.invoke({"input": "你是谁"})
    print(chainRes)
