from typing import Any, List
from llama_index.core.llms import CustomLLM, ChatResponse, CompletionResponse
from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback
from llama_index.core.base.llms.types import ChatMessage, CompletionResponseGen
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
from pydantic import Field, model_validator
from zhipuai import ZhipuAI
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

from dataclasses import dataclass

# @dataclass 是一个装饰器（位于dataclasses模块中），用于自动生成类中的样板代码（如 __init__、__repr__ 等），特别适合用于主要存储数据的类.
@dataclass
class LLMetadata:
    model_name: str
    context_window: int
    is_chat_model: bool
    num_output: int

class ZhipuAILLM(CustomLLM):
    """完全兼容LlamaIndex 0.12.43的终极解决方案"""

    # 必须声明为类变量（不是实例属性）
    model: str = "glm-4"
    temperature: float = 0.7
    max_tokens: int = 2048

    # 关键修改：使用Field声明元数据字段
    context_window: int = Field(default=128000, alias="context_window")
    is_chat_model: bool = Field(default=True, alias="is_chat_model")

    """定义一个名为 client 的字段，它可以存储任何类型的值。它的默认值是 None。最重要的是，
    当将这个模型转换为字典或JSON字符串时，不要包含这个 client 字段"""
    client: Any = Field(default=None, exclude=True)

    @model_validator(mode="after")
    def init_client(self):
        self.client = ZhipuAI(api_key=os.getenv("ZHIPUAI_API_KEY"))
        return self

    @property
    def metadata(self) -> LLMetadata:
        """同时支持属性访问和字典访问"""
        return LLMetadata(
            model_name=self.model,
            context_window=self.context_window,
            is_chat_model=self.is_chat_model,
            num_output=self.max_tokens
        )

    @llm_completion_callback()
    def complete(self, prompt: str, **kwargs) -> CompletionResponse:
        response = self.client.chat.completions.create(
            model=self.model,
            messages=[{"role": "user", "content": prompt}],
            temperature=kwargs.get("temperature", self.temperature),
            max_tokens=kwargs.get("max_tokens", self.max_tokens)
        )
        return CompletionResponse(text=response.choices[0].message.content)

    @llm_completion_callback()
    def stream_complete(self, prompt: str, **kwargs) -> CompletionResponseGen:
        response = self.client.chat.completions.create(
            model=self.model,
            messages=[{"role": "user", "content": prompt}],
            stream=True,
            temperature=kwargs.get("temperature", self.temperature),
            max_tokens=kwargs.get("max_tokens", self.max_tokens)
        )
        for chunk in response:
            content = chunk.choices[0].delta.content or ""
            yield CompletionResponse(text=content, delta=content)

    @llm_chat_callback()
    def chat(self, messages: List[ChatMessage], **kwargs) -> ChatResponse:
        formatted_messages = [
            {"role": msg.role.value, "content": msg.content} for msg in messages
        ]
        response = self.client.chat.completions.create(
            model=self.model,
            messages=formatted_messages,
            temperature=kwargs.get("temperature", self.temperature),
            max_tokens=kwargs.get("max_tokens", self.max_tokens)
        )
        return ChatResponse(
            message=ChatMessage(
                role="assistant",
                content=response.choices[0].message.content
            )
        )

# 配置嵌入模型
model_name = "D:/ideaSpace/MyPython/models/bge-small-zh-v1.5"
embed_model = HuggingFaceEmbedding(
    model_name=model_name,
    device="cpu"
)

# 初始化智谱GLM-4
llm = ZhipuAILLM(model="glm-4", temperature=0.5)

# 全局设置
Settings.llm = llm  # 使用自定义的 ZhipuAILLM
Settings.embed_model = embed_model
Settings.chunk_size = 512  # 文本分块大小
Settings.chunk_overlap = 50 # 文本分块重叠长度

# 加载文档（假设文档在./data目录）
documents = SimpleDirectoryReader("./data").load_data()

# 构建向量索引
index = VectorStoreIndex.from_documents(documents)

# 创建查询引擎
query_engine = index.as_query_engine(similarity_top_k=3)

# 输入问题并获取增强后的答案
response = query_engine.query("量子计算的主要挑战是什么？")
print(f"答案：{response}\n")

# 显示检索到的参考文档
for i, node in enumerate(response.source_nodes):
    print(f"参考文档 {i+1}:\n{node.text[:300]}...\n")