import base64

from langchain.llms.base import LLM
from zhipuai import ZhipuAI
from langchain_core.messages.ai import AIMessage
import os
from typing import List


class ChatGLM4(LLM):
    history: List[dict] = []  # 指定类型为 List[dict]
    client: ZhipuAI = None  # 移除 ClassVar，使其成为实例变量

    def __init__(self):
        super().__init__()
        # zhipuai_api_key = os.getenv('ZHUPU_API_KEY')
        zhipuai_api_key = "01bc3258797451e5c530708830a8e2b4.ecfQiHRZweOfwVF1"
        self.client = ZhipuAI(api_key=zhipuai_api_key)

    @property
    def _llm_type(self):
        return "ChatGLM4"

    def invoke(self, prompt: str, config: dict = {}, history: List[dict] = None) -> AIMessage:
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",
            messages=history
        )

        result = response.choices[0].message.content
        return AIMessage(content=result)

    def _call(self, prompt: str, config: dict, history: List[dict] = None) -> AIMessage:
        return self.invoke(prompt, history)

    def stream(self, prompt: str, config: dict = {}, history: List[dict] = None):
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()
        history.append({"role": "user", "content": prompt})
        response = self.client.chat.completions.create(
            model="glm-4",
            messages=history,
            stream=True
        )
        for chunk in response:
            yield chunk.choices[0].delta.content

# 可传递图片视频等
class ChatGLM4v(LLM):
    history: List[dict] = []  # 指定类型为 List[dict]
    client: ZhipuAI = None  # 移除 ClassVar，使其成为实例变量

    def __init__(self):
        super().__init__()
        # zhipuai_api_key = os.getenv('ZHUPU_API_KEY')
        zhipuai_api_key = "01bc3258797451e5c530708830a8e2b4.ecfQiHRZweOfwVF1"
        self.client = ZhipuAI(api_key=zhipuai_api_key)

    @property
    def _llm_type(self):
        return "glm-4v-plus"

    def invoke(self, prompt: str,img_base: str, config: dict = {}, history: List[dict] = None) -> AIMessage:
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()
        history.append({"role": "user", "content": [{"type":"image_url","image_url":{"url":img_base}},{"type": "text","text": prompt}]})
        response = self.client.chat.completions.create(
            model="glm-4v-plus",
            messages=history
        )

        result = response.choices[0].message.content
        return AIMessage(content=result)

    def _call(self, prompt: str, config: dict, history: List[dict] = None) -> AIMessage:
        return self.invoke(prompt, history)

    def stream(self, prompt: str, img_base: base64 = None,video_base: str = None, config: dict = {}, history: List[dict] = None):
        if history is None:
            history = []
        if not isinstance(prompt, str):
            prompt = prompt.to_string()
        elif img_base is not None:
            print("图片模式")
            history.append({"role": "user", "content": [{"type":"image_url","image_url":{"url":img_base}},{"type": "text","text": prompt}]})
        else:
            print("普通模式")
            history.append({"role": "user", "content": [{"type": "text", "text": prompt}]})

        response = self.client.chat.completions.create(
            model="glm-4v-plus",
            messages=history,
            stream=True
        )
        for chunk in response:
            yield chunk.choices[0].delta.content
