import re
from typing import Optional, Any

import torch
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig, TextIteratorStreamer
from langchain.llms.base import LLM
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
import langchain_core.messages
from threading import Thread
import gradio as gr
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import DirectoryLoader
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
import warnings

torch.cuda.empty_cache()
# "transformers==4.51.0",
# "accelerate==0.26.0",
path = r'M:\moudels\Qwen3_8B'
embeddings_path = r'M:\moudels\BAAIbge-large-zh-v1.5'

with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    embeddings = HuggingFaceEmbeddings(model_name=embeddings_path)
    load = DirectoryLoader(r'F:\A_wokecode\gradio_study\langchain_study\data')
    vs = FAISS.from_documents(load.load(), embeddings)
    retriever = vs.as_retriever()  # 辨别器


# 4位量化配置
quantization_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_compute_dtype=torch.float16,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_use_double_quant=True
)


def parse_chat_string(chat_str):
    # 使用正则表达式匹配 System 和 Human 部分
    # re.DOTALL 使 . 匹配包括换行符在内的所有字符
    pattern = r'^System:\s*(.*?)\nHuman:\s*(.*)$'
    match = re.match(pattern, chat_str, re.DOTALL)

    if not match:
        raise ValueError("Invalid chat string format")

    system_content = match.group(1).strip()  # 提取 System 内容并去除首尾空白
    human_content = match.group(2).strip()  # 提取 Human 内容并去除首尾空白

    # 构建结构化消息列表
    messages = [
        {"role": "system", "content": system_content},
        {"role": "user", "content": human_content}
    ]
    return messages


class QW3(LLM):
    # 必须写成 类属性
    max_length: int = 512
    do_sample: bool = True
    temperature: float = 0.7
    top_p: float = 0.5
    history: list = []

    tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained(path,
                                                             trust_remote_code=True,
                                                             )
    model: AutoModelForCausalLM = AutoModelForCausalLM.from_pretrained(path,
                                                                       device_map="auto",
                                                                       quantization_config=quantization_config,
                                                                       trust_remote_code=True,
                                                                       # max_memory={0: "12GiB", "cpu": "14GiB"}
                                                                       ).eval()

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self):
        return "qw3"

    def _call(self, prompt, stop=None) -> str:
        """普通聊天模式（一次性返回完整回复）"""
        # 将 LangChain 消息格式转换为标准字典格式
        messages = parse_chat_string(prompt)
        print(messages)
        # 构建对话格式
        # messages = [
        #     {"role": "system", "content": ""},
        #     {"role": "user", "content": "hi"}
        # ]

        # 应用聊天模板
        text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        # 编码输入
        model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
        # 生成回复
        generated_ids = self.model.generate(
            **model_inputs,
            max_new_tokens=self.max_length,
            temperature=self.temperature,
            top_p=self.top_p,
            repetition_penalty=1.1,
            do_sample=self.do_sample
        )
        # 解码输出
        response = self.tokenizer.batch_decode(
            generated_ids[:, model_inputs.input_ids.shape[1]:],
            skip_special_tokens=True
        )[0]
        # 预处理：移除或替换空行
        response = re.sub(r'\n\s*\n', '<br/>', response)  # 将连续空行替换为<br/>
        response = re.sub(r'\n', ' ', response)  # 将单个换行符替换为空格
        return response.strip()


if __name__ == '__main__':
    torch.cuda.empty_cache()
    mod = QW3()
    print("加载完成")
    prompt = ChatPromptTemplate.from_messages([
        ("system", """如果下面的内容相关，根据下面的内容回答问题
                      {content}"""),
        ("user", "{input}")
    ])
    runParallel = RunnableParallel({
        "content": retriever,
        "input": RunnablePassthrough()
    })
    chain = runParallel | prompt | mod
    print(chain.stream("aaa"))
    # chainRes = chain.invoke("你知道高世缘吗？介绍一下他")
    # print(chainRes)
