import os
from langchain.llms.base import LLM
from zhipuai import ZhipuAI
from langchain_core.messages.ai import AIMessage
from typing import ClassVar


class ChatGLM4(LLM):
    history:ClassVar[list] = []
    client: object = None
    max_history_length : ClassVar[int]= 10  # 设置对话历史的最大长度，可根据实际情况调整

    def __init__(self):
        super().__init__()
        zhipuai_api_key = os.getenv('ZP_API_KEY')
        if zhipuai_api_key is None:
            raise ValueError("请设置ZHUPU_API_KEY环境变量以提供智谱AI的API密钥。")
        self.client = ZhipuAI(api_key=zhipuai_api_key)

    @property
    def _llm_type(self):
        return "ChatGLM4"

    def _truncate_history(self):
        """
        当对话历史超过最大长度时，截断历史记录，只保留最新的部分
        """
        if len(self.history) > self.max_history_length:
            self.history = self.history[-self.max_history_length:]

    def invoke(self, prompt, config={}, history=[]):
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()


        self.history.append({"role": "user", "content": prompt})
        self._truncate_history()

        try:
            response = self.client.chat.completions.create(
                model="glm-4-flash",
                messages=self.history
            )

            result = response.choices[0].message.content
            return AIMessage(content=result)
        except Exception as e:
            print(f"调用ChatGLM4模型时出错: {e}")
            raise

    def _call(self, prompt, config={}, history=[]):
        return self.invoke(prompt, history)

    def stream(self, prompt, config={}, history=[]):
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()

        self.history.extend(history)
        self.history.append({"role": "user", "content": prompt})
        self._truncate_history()

        try:
            response = self.client.chat.completions.create(
                model="glm-4-flash",
                messages=self.history,
                stream=True
            )
            for chunk in response:
                yield chunk.choices[0].delta.content
        except Exception as e:
            print(f"以流方式调用ChatGLM4模型时出错: {e}")
            raise


def main():
    # 初始化ChatGLM4实例
    llm = ChatGLM4()

    # 第一次调用invoke方法获取完整响应
    prompt1 = "今天天气怎么样？"
    response1 = llm.invoke(prompt1)
    print(f"完整响应（第一次提问）: {response1.content}")

    # 第二次调用invoke方法，带上第一次的对话历史
    prompt2 = "那明天的天气呢？"
    history_for_second_call = [{"role": "user", "content": prompt1},
                               {"role": "answer", "content": response1.content}]
    response2 = llm.invoke(prompt2, history=history_for_second_call)
    print(f"完整响应（第二次提问）: {response2.content}")


    response3 = llm.invoke("请讲1个减肥的笑话吧")
    print(f"完整响应（第三次提问）: {response3.content}")

    # 调用stream方法以流的方式获取响应内容
    llm.history.clear()
    print(f"流式响应（第四次提问）: ")
    for deltaStr in llm.stream("如何鼓励自己减肥！"):
        print(deltaStr,end="")


if __name__ == "__main__":
    main()