import os
from dotenv import load_dotenv
from pprint import pprint

from langchain.agents.agent_toolkits import (
    create_retriever_tool,
    create_conversational_retrieval_agent
)
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI  # 或 Tongyi
from langchain.llms import Tongyi
from langchain.schema import Document
from langchain.chains import RetrievalQA

from transformers import GPT2TokenizerFast


# 初始化智谱 embedding
from typing import Any, Dict, List, Optional

from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, Field, model_validator
import os
from zhipuai import ZhipuAI

class ZhipuAIEmbeddings(BaseModel, Embeddings):
    """ZhipuAI embedding model integration.

    Setup:

        To use, you should have the ``zhipuai`` python package installed, and the
        environment variable ``ZHIPU_API_KEY`` set with your API KEY.

        More instructions about ZhipuAi Embeddings, you can get it
        from  https://open.bigmodel.cn/dev/api#vector

        .. code-block:: bash

            pip install -U zhipuai
            export ZHIPU_API_KEY="your-api-key"

    Key init args — completion params:
        model: Optional[str]
            Name of ZhipuAI model to use.
        api_key: str
            Automatically inferred from env var `ZHIPU_API_KEY` if not provided.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:

        .. code-block:: python

            from langchain_community.embeddings import ZhipuAIEmbeddings

            embed = ZhipuAIEmbeddings(
                model="embedding-2",
                # api_key="...",
            )

    Embed single text:
        .. code-block:: python

            input_text = "The meaning of life is 42"
            embed.embed_query(input_text)

        .. code-block:: python

            [-0.003832892, 0.049372625, -0.035413884, -0.019301128, 0.0068899863, 0.01248398, -0.022153955, 0.006623926, 0.00778216, 0.009558191, ...]


    Embed multiple text:
        .. code-block:: python

            input_texts = ["This is a test query1.", "This is a test query2."]
            embed.embed_documents(input_texts)

        .. code-block:: python

            [
                [0.0083934665, 0.037985895, -0.06684559, -0.039616987, 0.015481004, -0.023952313, ...],
                [-0.02713102, -0.005470169, 0.032321047, 0.042484466, 0.023290444, 0.02170547, ...]
            ]
    """  # noqa: E501

    client: Any = Field(default=None, exclude=True)  #: :meta private:
    model: str = Field(default="embedding-2")
    """Model name"""
    api_key: str
    """Automatically inferred from env var `ZHIPU_API_KEY` if not provided."""
    dimensions: Optional[int] = None
    """The number of dimensions the resulting output embeddings should have.

    Only supported in `embedding-3` and later models.
    """

    @model_validator(mode="before")
    @classmethod
    def validate_environment(cls, values: Dict) -> Any:
        """Validate that auth token exists in environment."""
        values["api_key"] = get_from_dict_or_env(values, "api_key", "ZHIPUAI_API_KEY")
        try:
            from zhipuai import ZhipuAI

            values["client"] = ZhipuAI(api_key=values["api_key"])
        except ImportError:
            raise ImportError(
                "Could not import zhipuai python package."
                "Please install it with `pip install zhipuai`."
            )
        return values



    def embed_query(self, text: str) -> List[float]:
        """
        Embeds a text using the AutoVOT algorithm.

        Args:
            text: A text to embed.

        Returns:
            Input document's embedded list.
        """
        resp = self.embed_documents([text])
        return resp[0]




    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """
        Embeds a list of text documents using the AutoVOT algorithm.

        Args:
            texts: A list of text documents to embed.

        Returns:
            A list of embeddings for each document in the input list.
            Each embedding is represented as a list of float values.
        """
        if self.dimensions is not None:
            resp = self.client.embeddings.create(
                model=self.model,
                input=texts,
                dimensions=self.dimensions,
            )
        else:
            resp = self.client.embeddings.create(model=self.model, input=texts)
        embeddings = [r.embedding for r in resp.data]
        return embeddings



# 自定义 Tongyi 类以使用指定的分词器
class CustomTongyi(Tongyi):
    class Config:
        extra = 'allow'  # 允许额外的属性
    def __init__(self, tokenizer_path, **kwargs):
        super().__init__(**kwargs)
        self.tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_path)

# 载入环境变量
load_dotenv()

# 定义嵌入函数（使用 ZhipuAI 的预训练嵌入模型）
# 请确保 ZhipuAIEmbeddings 已正确导入

embedding = ZhipuAIEmbeddings(
    model="embedding-2",
    api_key=os.getenv("ZHIPU_API_KEY"),
    dimensions=1024
)

# 初始化 Chroma 向量存储
persist_directory = "ch16_db"  # 确保此路径存在或会被创建
vectorstore = Chroma(
    persist_directory=persist_directory,
    embedding_function=embedding,
    collection_name="products"
)

# 准备要添加的文档和元数据
documents = ["Galaxy S21", "iPhone 13", "MacBook Pro"]
metadatas = [
    {"category": "手机", "price": 799.99},
    {"category": "手机", "price": 999.99},
    {"category": "笔记本电脑", "price": 1299.99}
]
ids = ["prod1", "prod2", "prod3"]

# 将文档和元数据转换为 LangChain 的 Document 对象
document_objects = [Document(page_content=doc, metadata=meta) for doc, meta in zip(documents, metadatas)]

# 添加文档到 Chroma 向量存储中
vectorstore.add_documents(documents=document_objects, ids=ids)

print("数据添加完成！")

# 管理向量存储中的文档
all_data = vectorstore._collection.get()
print("集合中的所有数据：")
pprint(all_data)

specific_data = vectorstore._collection.get(ids=["prod1"])
print("\nID 为 'prod1' 的文档：")
pprint(specific_data)

filtered_data = vectorstore._collection.get(where={"category": "手机"})
print("\n类别为 '手机' 的文档：")
pprint(filtered_data)

vectorstore._collection.update(
    ids=["prod1"],
    metadatas=[{"category": "手机", "price": 749.99}]
)
print("\n已更新 ID 为 'prod1' 的文档价格。")

vectorstore._collection.delete(ids=["prod2"])
print("\n已删除 ID 为 'prod2' 的文档。")

remaining_data = vectorstore._collection.get()
print("\n剩余的文档：")
pprint(remaining_data)

# 创建检索器和检索工具
retriever = vectorstore.as_retriever()

tool = create_retriever_tool(
    retriever,
    name="search_documents",
    description="根据查询搜索并检索相关文档。"
)

# 初始化自定义 Tongyi 语言模型（或使用 ChatOpenAI）
llm = CustomTongyi(
    tokenizer_path="/root/autodl-tmp/lizhenping/langchain_tutorial/LangChain_CookBook/data/openai-community-gpt2",
    model='qwen-plus',  # 替换为您使用的 Tongyi 模型名称
    top_p=0.9,
    temperature=0.9,
    api_key=os.getenv("DASHSCOPE_API_KEY")
)

# 如果您更倾向于使用 OpenAI 的 ChatOpenAI 模型，请取消以下注释并注释掉上面的 Tongyi 初始化
"""
from langchain.chat_models import ChatOpenAI

llm = ChatOpenAI(
    model_name="gpt-3.5-turbo",
    openai_api_key=os.getenv("OPENAI_API_KEY"),
    temperature=0.7,
    max_tokens=1500
)
"""

# 创建会话检索代理
agent_executor = create_conversational_retrieval_agent(
    llm,
    tools=[tool],
    verbose=True  # 启用详细模式以查看执行日志
)

# 使用 Agent 进行对话
query = "在中国市场，根据中国汽车工业协会的数据，2022年新能源汽车的渗透率达到了多少？"
try:
    result = agent_executor({"input": query})
    print("\nAgent 的响应：")
    pprint(result)
except Exception as e:
    print(f"发生错误: {e}")
