from typing import Any, Dict, List
from llama_index.core.llms import CustomLLM, ChatResponse
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
from llama_index.core.base.llms.types import (
    ChatMessage,
    CompletionResponse,
    CompletionResponseGen,
)
from pydantic import PrivateAttr
from zhipuai import ZhipuAI
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

class ZhipuAILLM(CustomLLM):
    """修复后的智谱GLM-4 LLM封装"""

    # Pydantic配置
    model: str = "glm-4"
    temperature: float = 0.7

    # 私有属性（避免Pydantic验证）
    _client: Any = PrivateAttr()

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self._client = ZhipuAI(api_key=os.getenv("ZHIPUAI_API_KEY"))

    @property
    def metadata(self) -> Dict[str, Any]:
        return {
            "model_name": self.model,
            "context_window": 128000,
            "is_chat_model": True
        }

    @llm_completion_callback()
    def complete(self, prompt: str, **kwargs) -> CompletionResponse:
        response = self._client.chat.completions.create(
            model=self.model,
            messages=[{"role": "user", "content": prompt}],
            temperature=self.temperature,
            max_tokens=kwargs.get("max_tokens", 1024)
        )
        return CompletionResponse(text=response.choices[0].message.content)

    @llm_completion_callback()
    def stream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
        response = self._client.chat.completions.create(
            model=self.model,
            messages=[{"role": "user", "content": prompt}],
            stream=True,
            **kwargs
        )
        for chunk in response:
            yield CompletionResponse(
                text=chunk.choices[0].delta.content or "",
                delta=chunk.choices[0].delta.content or ""
            )

    @llm_chat_callback()
    def chat(self, messages: List[ChatMessage], **kwargs) -> ChatResponse:
        formatted_messages = [
            {"role": msg.role, "content": msg.content} for msg in messages
        ]
        response = self._client.chat.completions.create(
            model=self.model,
            messages=formatted_messages,
            temperature=self.temperature,
            **kwargs
        )
        return ChatResponse(
            message=ChatMessage(
                role="assistant",
                content=response.choices[0].message.content
            )
        )

    # 异步方法（简化版）
    async def acomplete(self, prompt: str, **kwargs) -> CompletionResponse:
        return self.complete(prompt, **kwargs)

# 初始化（确保环境变量ZHIPUAI_API_KEY已设置）
llm = ZhipuAILLM(model="glm-4", temperature=0.5)

# 同步调用
print(llm.complete("解释量子纠缠"))
print(llm.complete("量子计算的主要挑战是什么？"))

# 流式调用
for chunk in llm.stream_complete("写一首关于AI的诗"):
    print(chunk.text, end="", flush=True)

# 聊天模式
messages = [
    ChatMessage(role="user", content="你好"),
    ChatMessage(role="assistant", content="您好！"),
    ChatMessage(role="user", content="量子比特是什么？")
]
print(llm.chat(messages))