import base64
import requests
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_core.outputs import ChatResult, ChatGeneration
from pydantic import Field


def image_to_base64(image_path):
    """将本地图片文件转换为 base64 编码"""
    with open(image_path, "rb") as image_file:
        base64_data = base64.b64encode(image_file.read()).decode('utf-8')
        return f"data:image/jpeg;base64,{base64_data}"


def _format_messages(messages):
    """将 LangChain 消息格式转换为智谱 API 格式"""
    formatted = []
    for msg in messages:
        if hasattr(msg, 'type'):
            # 处理多模态消息
            if hasattr(msg, 'content') and isinstance(msg.content, list):
                content_list = []
                for content_item in msg.content:
                    if content_item.get('type') == 'text':
                        content_list.append({
                            "type": "text",
                            "text": content_item['text']
                        })
                    elif content_item.get('type') == 'image_url':
                        image_url = content_item['image_url']['url']

                        if image_url.startswith('file://') or image_url.startswith('/'):
                            # 处理本地文件路径
                            image_path = image_url.replace('file://', '')
                            base64_data = image_to_base64(image_path)
                            content_list.append({
                                "type": "image_url",
                                "image_url": {
                                    "url": base64_data
                                }
                            })
                        elif image_url.startswith('data:image'):
                            # 已经是 base64 格式
                            content_list.append({
                                "type": "image_url",
                                "image_url": {
                                    "url": image_url
                                }
                            })
                        else:
                            # 处理网络 URL
                            try:
                                response = requests.get(image_url)
                                response.raise_for_status()
                                base64_data = base64.b64encode(response.content).decode('utf-8')
                                content_list.append({
                                    "type": "image_url",
                                    "image_url": {
                                        "url": f"data:image/jpeg;base64,{base64_data}"
                                    }
                                })
                            except Exception as e:
                                print(f"图片处理失败: {e}")
                                continue
                formatted.append({
                    "role": "user",  # 多模态消息通常是用户消息
                    "content": content_list
                })
            else:
                # 处理普通文本消息
                formatted.append({
                    "role": "user" if msg.type == "human" else "assistant",
                    "content": msg.content
                })
    return formatted


class GLM45V(BaseChatModel):
    """智谱 AI GLM-4V 多模态模型"""
    api_key: str = Field(..., description="智谱 AI API 密钥")
    base_url: str = Field(default="https://open.bigmodel.cn/api/paas/v4/chat/completions")

    def __init__(self, api_key=None, **kwargs):
        super().__init__(api_key=api_key, **kwargs)

    def _generate(self, messages, stop=None, run_manager=None, **kwargs):
        try:
            response_content = self._call_glm4v_api(messages)

            message = AIMessage(content=response_content)
            generation = ChatGeneration(message=message)
            result = ChatResult(generations=[generation])

            return result

        except Exception as e:
            error_message = AIMessage(content=f"API调用错误: {str(e)}")
            error_generation = ChatGeneration(message=error_message)
            return ChatResult(generations=[error_generation])

    def _call_glm4v_api(self, messages):
        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.api_key}"
        }

        # 转换 LangChain 消息格式为智谱 API 格式
        formatted_messages = _format_messages(messages)

        payload = {
            "model": "glm-4.5v",
            "messages": formatted_messages,
            # "max_tokens": 1000
        }

        response = requests.post(self.base_url, headers=headers, json=payload)
        response.raise_for_status()  # 这会抛出 HTTPError 如果状态码不是 200

        result = response.json()
        return result["choices"][0]["message"]["content"]

    @property
    def _llm_type(self) -> str:
        return "glm-4.5v"