from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModel
import gradio as gr
import torch
import transformers

print(torch.__version__)
print(torch.cuda.is_available())
print(transformers.__version__)

# 清除gpu
torch.cuda.empty_cache()

path = r'M:\moudels\chatglm3\chatglm3-6b'

tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=True)

# 这个错误是因为你同时使用了 device_map="auto" 和 .cuda() 方法，这两者是冲突的。device_map="auto" 会自动将模型的不同层分配到不同的设备上
# （可能包括 CPU 和 GPU），而 .cuda() 尝试将整个模型移动到 GPU，这会导致冲突。
# model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto").half().cuda()  error
# model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto").half()
model = AutoModel.from_pretrained(path, trust_remote_code=True, device_map="auto", load_in_4bit=True,
                                  bnb_4bit_compute_dtype=torch.float16)
model = model.eval()
#  https://pan.baidu.com/s/1l_9cT32puG52CvT0zhIuXw 提取码: 1234

CSS = """
*{
    font-family: bold STKaiti, Kaiti SC, Kaiti, BlinkMacSystemFont, Helvetica Neue, PingFang SC, Microsoft YaHei, Source Han Sans SC, Noto Sans CJK SC, WenQuanYi Micro Hei, Arial, sans-serif;
    font-size: 20px;
    color: rgb(147 102 0);
}
"""
dome = gr.Blocks(css=CSS)


with dome:
    # 1. 基础对话 API (chat方法)
    def chat_gsy(mes, current_his):
        response, history = model.chat(
            tokenizer,
            # "你好，请介绍下ChatGLM3",
            mes,
            history=current_his[-1:]
        )
        # history_all.append(current_his)
        print("普通对话模式:")
        print("response:  ", response)
        print("history:  ", history)
        print("-" * 50)
        return response

        # 2. 流式对话 API (stream_chat方法)

    # 2. 流式对话
    def chat_stream_gsy(mes, current_his):
        print("流式对话模式:")
        response = ""
        # history = current_his
        current_his.append({"role": "system", "content": "你扮演郭德纲"})
        for response, history in model.stream_chat(tokenizer,
                                                   mes,
                                                   history=current_his[-1:],
                                                   temperature=0.8,  # 控制随机性 (0-1)
                                                   top_p=0.9,  # 核采样参数
                                                   max_length=2048,  # 最大生成长度
                                                   repetition_penalty=1.2  # 重复惩罚系数
                                                   ):
            # 实时打印新增内容
            yield response
            # print(new_text, end="", flush=True)
        print("response:  ", response)
        print("current_his:  ", current_his)
        # print("-" * 50)

    # 3. 多轮对话示例
    def multi_turn_chat(questions):
        global history
        questions = [
            "什么是机器学习？",
            "它和深度学习有什么关系？",
            "能给我一个实际应用的例子吗？"
        ]
        print("多轮对话示例:")
        for q in questions:
            response, history = model.chat(
                tokenizer,
                q,
                history=history
            )
            print(f"用户: {q}")
            print(f"助手: {response}\n")
        print("-" * 50)

    # 4. 带参数控制的对话
    def parameterized_chat():
        response, _ = model.chat(
            tokenizer,
            "用诗意的语言描述星空",
            history=[],
            temperature=0.8,  # 控制随机性 (0-1)
            top_p=0.9,  # 核采样参数
            max_length=2048,  # 最大生成长度
            repetition_penalty=1.2  # 重复惩罚系数
        )
        print("参数化对话:")
        print(response)
        print("-" * 50)

        # chat_out = gr.Chatbot()
        # textBox_in = gr.Textbox(label='in')
        # textBox_in.submit(fn=chat, inputs=[textBox_in, chat_out], outputs=[textBox_in, chat_out])

    chat_interface = gr.ChatInterface(
        fn=chat_stream_gsy,
        type="messages",
        title="ChatGLM3-6B-LOCAL",
        description="与ChatGLM3-6B模型进行实时对话",
        submit_btn="提交",
        stop_btn="撤销",
    )

if __name__ == '__main__':
    pass
    dome.launch()
    # chat_stream_gsy("你好", [])
