from typing import Optional, Any

import torch
from transformers import AutoTokenizer, AutoModel
from langchain.llms.base import LLM
from langchain_core.prompts import ChatPromptTemplate

torch.cuda.empty_cache()

path = r'M:\moudels\chatglm3\chatglm3-6b'


# path = r'M:\moudels\Qwen314B\Qwen3-14B'


#
# tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=True)
#
# model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto", load_in_4bit=True,
#                                   bnb_4bit_compute_dtype=torch.float16)
# model = model.eval()

class QW3(LLM):
    # 必须写成 类属性
    max_length: int = 2048
    do_sample: bool = True
    temperature: float = 0.7
    top_p: float = 0.5
    history: list = []
    #    "transformers==4.30.2",
    #    "accelerate==0.24.0",
    tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained(path,
                                                             trust_remote_code=True,
                                                             use_fast=True
                                                             )
    model: AutoModel = AutoModel.from_pretrained(path,
                                                 trust_remote_code=True,
                                                 device_map="auto",
                                                 torch_dtype="auto",
                                                 load_in_4bit=True,
                                                 bnb_4bit_compute_dtype=torch.float16
                                                 ).eval()

    def __init__(self):
        super().__init__()

    @property
    def _llm_type(self):
        return "qw3"

    def _call(self, prompt, stop=None) -> str:
        response, history = self.model.chat(
            self.tokenizer,
            # "你好，请介绍下ChatGLM3",
            prompt,
            history=self.history,
            do_sample=self.do_sample,
            max_length=self.max_length,
            temperature=self.temperature,
        )
        print("普通对话模式:")
        print("response:  ", response)
        print("self.history:  ", self.history)
        self.history.append(history[-1])
        return response

    def stream_chat(self, prompt):
        print("流对话模式:")
        history = ""
        response = ""
        for response, history in self.model.stream_chat(self.tokenizer,
                                                        prompt,
                                                        history=self.history,
                                                        temperature=self.temperature,  # 控制随机性 (0-1)
                                                        top_p=self.top_p,  # 核采样参数
                                                        max_length=self.max_length,  # 最大生成长度
                                                        repetition_penalty=1.2  # 重复惩罚系数
                                                        ):
            # 实时打印新增内容
            # print("response:  ", response)
            yield response
            # print(new_text, end="", flush=True)
        print("response:  ", response)
        print("self.history:  ", self.history)
        self.history.append(history[-1])


if __name__ == '__main__':
    mod = QW3()
    print("加载完成")
    res2 = mod.invoke("你好")
    print(res2)
    # res3 = mod.invoke("西安的小吃")
    # print(res3)
    # res4 = mod.invoke("你刚刚说的是哪的小吃")
    # print(res4)
    # for res in mod.stream_chat("你好"): pass
    # for res in mod.stream_chat("西安的小吃"): pass
    # for res in mod.stream_chat("你刚刚说的是哪的小吃"): pass

    # 提示词，模板函数
    # prompt = ChatPromptTemplate.from_messages([
    #     ("system", "你是一个面馆点餐机器人"),
    #     ("user", "{input}"),
    #
    # ])
    # chain = prompt | mod
    # chain.invoke({"input": "你是谁"})
