import re
from typing import Optional, Any, Iterator
from threading import Thread

import gradio as gr

import torch

from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig, TextIteratorStreamer

from langchain.llms.base import LLM

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from langchain_core.outputs import GenerationChunk

from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import DirectoryLoader

import warnings
from bs4 import BeautifulSoup

torch.cuda.empty_cache()
# "transformers==4.51.0",
# "accelerate==0.26.0",
path = r'M:\moudels\Qwen3_4B'
embeddings_path = r'M:\moudels\BAAIbge-large-zh-v1.5'

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    embeddings = HuggingFaceEmbeddings(model_name=embeddings_path)
    load = DirectoryLoader(r'F:\A_wokecode\gradio_study\langchain_study\data')
    vs = FAISS.from_documents(load.load(), embeddings)
    retriever = vs.as_retriever()  # 辨别器

#
# tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=True)
#
# model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto", load_in_4bit=True,
#                                   bnb_4bit_compute_dtype=torch.float16)
# model = model.eval()


# 4位量化配置
quantization_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.float16,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True
)


def parse_chat_string(chat_str):
    # 使用正则表达式匹配 System 和 Human 部分
    # re.DOTALL 使 . 匹配包括换行符在内的所有字符
    # print(chat_str)
    pattern = r'^System:\s*(.*?)\nHuman:\s*(.*)$'
    match = re.match(pattern, chat_str, re.DOTALL)

    if not match:
        raise ValueError("Invalid chat string format")

    system_content = match.group(1).strip()  # 提取 System 内容并去除首尾空白
    human_content = match.group(2).strip()  # 提取 Human 内容并去除首尾空白

    # 构建结构化消息列表
    messages = [
        {"role": "system", "content": system_content},
        {"role": "user", "content": human_content}
    ]
    # print(messages)
    return messages


class QW3(LLM):
    # 必须写成 类属性
    max_length: int = 512
    do_sample: bool = True
    temperature: float = 0.7
    top_p: float = 0.5
    history: list = []

    tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained(path,
                                                             trust_remote_code=True,
                                                             )
    model: AutoModelForCausalLM = AutoModelForCausalLM.from_pretrained(path,
                                                                       device_map="auto",
                                                                       quantization_config=quantization_config,
                                                                       trust_remote_code=True,
                                                                       # max_memory={0: "12GiB", "cpu": "14GiB"}
                                                                       ).eval()

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self):
        return "qw3"

    def _call(self, prompt: str, stop: Optional[list] = None, **kwargs: Any) -> str:
        """普通聊天模式（一次性返回完整回复）"""
        # 将 LangChain 消息格式转换为标准字典格式
        messages = parse_chat_string(prompt)
        # 构建对话格式
        # messages = [
        #     {"role": "system", "content": ""},
        #     {"role": "user", "content": "hi"}
        # ]

        # 应用聊天模板
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
        # 生成回复
        generated_ids = self.model.generate(
            **model_inputs,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )
        # 解码输出
        response = self.tokenizer.batch_decode(
            generated_ids[:, model_inputs.input_ids.shape[1]:],
            skip_special_tokens=True
        )[0]
        return response.strip()

    def _stream(self, prompt: str, stop: Optional[list] = None, **kwargs: Any) -> Iterator:
        """流式聊天模式（逐字返回回复）"""
        messages = parse_chat_string(prompt)
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)

        # 创建流式处理器
        streamer = TextIteratorStreamer(
            self.tokenizer,
            skip_prompt=True,
            skip_special_tokens=True
        )

        # 启动生成线程
        generation_kwargs = dict(
            **model_inputs,
            streamer=streamer,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )

        thread = Thread(target=self.model.generate, kwargs=generation_kwargs)
        thread.start()

        # 逐字生成回复
        for text_chunk in streamer:
            yield GenerationChunk(text=text_chunk)


mod = QW3()
print("加载模型完成")

# 补充html标签
def html_precess(buffer, response, content, open_tags):
    buffer += content
    response += content

    # 检查是否需要补全标签
    if '<' in buffer and '>' in buffer:
        # 提取当前缓冲区中的所有标签
        tags = re.findall(r'<(/?)([a-zA-Z1-6]+)(?:\s[^>]*)?>', buffer)

        # 更新打开的标签列表
        for tag in tags:
            if tag[0] == '/':  # 闭合标签
                if open_tags and open_tags[-1] == tag[1]:
                    open_tags.pop()
            else:  # 打开标签
                if not tag[0]:  # 不是自闭合标签
                    open_tags.append(tag[1])

        # 清空缓冲区
        buffer = ""

    # 生成补全标签的响应
    complete_response = response
    if open_tags:
        # 补全未关闭的标签
        complete_response += ''.join([f'</{tag}>' for tag in reversed(open_tags)])
    return complete_response


def gradio_chat(question, his=None):
    torch.cuda.empty_cache()
    prompt = ChatPromptTemplate.from_messages([
        ("system", """如果问题与下面的内容相关：根据下面的内容回答问题,越靠前的文档参考的价值相对交到。
                      如果问题和内容不相关：不要使用文档内容。
                        {content}"""),
        ("user", "{input}")
    ])
    runParallel = RunnableParallel({
        "content": retriever,
        "input": RunnablePassthrough()
    })
    chain = runParallel | prompt | mod
    # for chunk in chain.stream("你知道高世缘吗？介绍一下他"):
    #     print(chunk, end='', flush=True)
    response = ""
    buffer = ""
    open_tags = []
    for chunk in chain.stream(question):
        # 使用兼容方法获取内容
        if hasattr(chunk, 'content'):
            content = chunk.content
        elif hasattr(chunk, 'text'):
            content = chunk.text
        else:
            content = str(chunk)

        # 预处理：移除或替换空行
        content = re.sub(r'\n\s*\n', '<br/>', content)  # 将连续空行替换为<br/>
        content = re.sub(r'\n', ' ', content)  # 将单个换行符替换为空格
        response += content

        process_response = html_precess(buffer, response, content, open_tags)
        yield process_response


CSS = """
*{
    font-family: bold STKaiti, Kaiti SC, Kaiti, BlinkMacSystemFont, Helvetica Neue, PingFang SC, Microsoft YaHei, Source Han Sans SC, Noto Sans CJK SC, WenQuanYi Micro Hei, Arial, sans-serif;
    font-size: 22px;
}
think{
    font-size: 14px;
}
#component-0{
    height: 600px !important; 
}

.input-container svelte-173056l textarea{
        height: 200px;
}
"""

chat_interface = gr.ChatInterface(
    fn=gradio_chat,
    chatbot=gr.Chatbot(
        sanitize_html=False,
        type="messages",
        height=500,  # 设置固定高度
        rtl=False,  # 确保从左到右渲染
        # avatar_images=["\U0001F60A", "\U0001F916"]  # 使用简单头像
    ),
    type="messages",
    title="Qwen3_8B模型",
    description="与Qwen3_8B模型进行实时对话",
    submit_btn="提交",
    stop_btn="撤销",
    css=CSS,
)

if __name__ == '__main__':
    pass
    chat_interface.launch()

