"""封装大模型"""

from langchain.llms.base import LLM
from zhipuai import ZhipuAI
from langchain_core.messages.ai import AIMessage
from typing import ClassVar, List, Dict
import os

class ChatGLM4Flash(LLM):
    history: ClassVar[List[Dict[str, str]]] = []
    client: object = None 
    
    def __init__(self,api_key):
        super().__init__()
        self.client = ZhipuAI(api_key=api_key)
        
    @property
    def _llm_type(self):
        return "GLM4Flash"

    def invoke(self, prompt: str, history=None) -> str:
        if history is None:
            history = self.history
            
        self.history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="GLM-4-Flash",
            messages=history,
        )
        result = response.choices[0].message.content
        return AIMessage(content=result)

    def _call(self,prompt,history=[]):
        return self.invoke(prompt,history)

    def stream(self,prompt,history=None):
        if history is None:
            history = self.history
            
        self.history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="GLM-4-Flash",
            messages=history,
            stream=True,        )
        for chunk in response:
            result = chunk.choices[0].delta.content
            yield AIMessage(content=result)


if __name__ == '__main__':
    from dotenv import load_dotenv
    load_dotenv()
    api_key = os.getenv('API_KEY')
    llm = ChatGLM4Flash(api_key=api_key)
    # str ouput
    # print(llm.invoke("你好").content)

    # stream output
    # for chunk in llm.stream("你好"):
    #     print(chunk.content,end="")
    
    
