{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from dotenv import load_dotenv\n",
    "\n",
    "# 从 .env 文件加载环境变量\n",
    "load_dotenv()\n",
    "\n",
    "# 验证 API 密钥是否已加载\n",
    "print(f\"ZhipuAI API Key: {os.getenv('ZHIPU_API_KEY')}\")\n",
    "print(f\"DasHSCOPE API Key: {os.getenv('DASHSCOPE_API_KEY')}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Any, Dict, List, Optional\n",
    "from langchain_core.embeddings import Embeddings\n",
    "from langchain_core.utils import get_from_dict_or_env\n",
    "from pydantic import BaseModel, Field, model_validator\n",
    "import os\n",
    "\n",
    "\n",
    "class ZhipuAIEmbeddings(BaseModel, Embeddings):\n",
    "    \"\"\"ZhipuAI embedding model integration.\n",
    "\n",
    "    Setup:\n",
    "\n",
    "        To use, you should have the ``zhipuai`` python package installed, and the\n",
    "        environment variable ``ZHIPU_API_KEY`` set with your API KEY.\n",
    "\n",
    "        More instructions about ZhipuAi Embeddings, you can get it\n",
    "        from  https://open.bigmodel.cn/dev/api#vector\n",
    "\n",
    "        .. code-block:: bash\n",
    "\n",
    "            pip install -U zhipuai\n",
    "            export ZHIPU_API_KEY=\"your-api-key\"\n",
    "\n",
    "    Key init args — completion params:\n",
    "        model: Optional[str]\n",
    "            Name of ZhipuAI model to use.\n",
    "        api_key: str\n",
    "            Automatically inferred from env var `ZHIPU_API_KEY` if not provided.\n",
    "\n",
    "    See full list of supported init args and their descriptions in the params section.\n",
    "\n",
    "    Instantiate:\n",
    "\n",
    "        .. code-block:: python\n",
    "\n",
    "            from langchain_community.embeddings import ZhipuAIEmbeddings\n",
    "\n",
    "            embed = ZhipuAIEmbeddings(\n",
    "                model=\"embedding-2\",\n",
    "                # api_key=\"...\",\n",
    "            )\n",
    "\n",
    "    Embed single text:\n",
    "        .. code-block:: python\n",
    "\n",
    "            input_text = \"The meaning of life is 42\"\n",
    "            embed.embed_query(input_text)\n",
    "\n",
    "        .. code-block:: python\n",
    "\n",
    "            [-0.003832892, 0.049372625, -0.035413884, -0.019301128, 0.0068899863, 0.01248398, -0.022153955, 0.006623926, 0.00778216, 0.009558191, ...]\n",
    "\n",
    "\n",
    "    Embed multiple text:\n",
    "        .. code-block:: python\n",
    "\n",
    "            input_texts = [\"This is a test query1.\", \"This is a test query2.\"]\n",
    "            embed.embed_documents(input_texts)\n",
    "\n",
    "        .. code-block:: python\n",
    "\n",
    "            [\n",
    "                [0.0083934665, 0.037985895, -0.06684559, -0.039616987, 0.015481004, -0.023952313, ...],\n",
    "                [-0.02713102, -0.005470169, 0.032321047, 0.042484466, 0.023290444, 0.02170547, ...]\n",
    "            ]\n",
    "    \"\"\"  # noqa: E501\n",
    "\n",
    "    client: Any = Field(default=None, exclude=True)  #: :meta private:\n",
    "    model: str = Field(default=\"embedding-2\")\n",
    "    \"\"\"Model name\"\"\"\n",
    "    api_key: str\n",
    "    \"\"\"Automatically inferred from env var `ZHIPU_API_KEY` if not provided.\"\"\"\n",
    "    dimensions: Optional[int] = None\n",
    "    \"\"\"The number of dimensions the resulting output embeddings should have.\n",
    "\n",
    "    Only supported in `embedding-3` and later models.\n",
    "    \"\"\"\n",
    "\n",
    "    @model_validator(mode=\"before\")\n",
    "    @classmethod\n",
    "    def validate_environment(cls, values: Dict) -> Any:\n",
    "        \"\"\"Validate that auth token exists in environment.\"\"\"\n",
    "        values[\"api_key\"] = get_from_dict_or_env(values, \"api_key\", \"ZHIPUAI_API_KEY\")\n",
    "        try:\n",
    "            from zhipuai import ZhipuAI\n",
    "\n",
    "            values[\"client\"] = ZhipuAI(api_key=values[\"api_key\"])\n",
    "        except ImportError:\n",
    "            raise ImportError(\n",
    "                \"Could not import zhipuai python package.\"\n",
    "                \"Please install it with `pip install zhipuai`.\"\n",
    "            )\n",
    "        return values\n",
    "\n",
    "\n",
    "\n",
    "    def embed_query(self, text: str) -> List[float]:\n",
    "        \"\"\"\n",
    "        Embeds a text using the AutoVOT algorithm.\n",
    "\n",
    "        Args:\n",
    "            text: A text to embed.\n",
    "\n",
    "        Returns:\n",
    "            Input document's embedded list.\n",
    "        \"\"\"\n",
    "        resp = self.embed_documents([text])\n",
    "        return resp[0]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    def embed_documents(self, texts: List[str]) -> List[List[float]]:\n",
    "        \"\"\"\n",
    "        Embeds a list of text documents using the AutoVOT algorithm.\n",
    "\n",
    "        Args:\n",
    "            texts: A list of text documents to embed.\n",
    "\n",
    "        Returns:\n",
    "            A list of embeddings for each document in the input list.\n",
    "            Each embedding is represented as a list of float values.\n",
    "        \"\"\"\n",
    "        if self.dimensions is not None:\n",
    "            resp = self.client.embeddings.create(\n",
    "                model=self.model,\n",
    "                input=texts,\n",
    "                dimensions=self.dimensions,\n",
    "            )\n",
    "        else:\n",
    "            resp = self.client.embeddings.create(model=self.model, input=texts)\n",
    "        embeddings = [r.embedding for r in resp.data]\n",
    "        return embeddings\n",
    "\n",
    "\n",
    "# 初始化嵌入\n",
    "embedding = ZhipuAIEmbeddings(\n",
    "    model=\"embedding-2\",\n",
    "    api_key=os.getenv(\"ZHIPU_API_KEY\"),\n",
    "    dimensions=1024\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import GPT2TokenizerFast\n",
    "from functools import lru_cache\n",
    "import langchain_core.language_models.base\n",
    "\n",
    "def custom_get_tokenizer() -> Any:\n",
    "    tokenizer_path = \"/root/autodl-tmp/lizhenping/langchain_tutorial/LangChain_CookBook/data/openai-community-gpt2\"\n",
    "    try:\n",
    "        tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_path)\n",
    "    except ImportError:\n",
    "        raise ImportError(\n",
    "            \"无法导入 transformers 包。请使用 `pip install transformers` 进行安装。\"\n",
    "        )\n",
    "    print(\"自定义分词器已加载。\")\n",
    "    return tokenizer\n",
    "\n",
    "# 替换 LangChain 中默认的分词器\n",
    "import langchain_core.language_models.base\n",
    "langchain_core.language_models.base.get_tokenizer = custom_get_tokenizer\n",
    "\n",
    "# 测试分词器\n",
    "tokenizer = custom_get_tokenizer()\n",
    "print(f\"分词器词汇表大小: {len(tokenizer)}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.vectorstores import Chroma\n",
    "\n",
    "# 定义持久化目录\n",
    "persist_directory = \"LangChain_CookBook/ch11_db\"\n",
    "\n",
    "# 初始化 Chroma 向量存储\n",
    "vectorstore = Chroma(\n",
    "    embedding_function=embedding,\n",
    "    collection_name=\"products\",\n",
    "    persist_directory=persist_directory\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.schema import Document\n",
    "\n",
    "# 示例文档和元数据\n",
    "documents = [\"Galaxy S21\", \"iPhone 13\", \"MacBook Pro\"]\n",
    "metadatas = [\n",
    "    {\"category\": \"手机\", \"price\": 799.99},\n",
    "    {\"category\": \"手机\", \"price\": 999.99},\n",
    "    {\"category\": \"笔记本电脑\", \"price\": 1299.99}\n",
    "]\n",
    "ids = [\"prod1\", \"prod2\", \"prod3\"]\n",
    "\n",
    "# 转换为 Document 对象\n",
    "document_objects = [Document(page_content=doc, metadata=meta) for doc, meta in zip(documents, metadatas)]\n",
    "\n",
    "# 添加文档到向量存储\n",
    "vectorstore.add_documents(documents=document_objects, ids=ids)\n",
    "\n",
    "# 将向量存储持久化到磁盘\n",
    "vectorstore.persist()\n",
    "\n",
    "print(\"文档已成功添加并持久化！\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pprint import pprint\n",
    "\n",
    "# 获取集合中的所有数据\n",
    "all_data = vectorstore._collection.get()\n",
    "print(\"集合中的所有文档：\")\n",
    "pprint(all_data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 检索 ID 为 'prod1' 的文档\n",
    "specific_data = vectorstore._collection.get(ids=[\"prod1\"])\n",
    "print(\"\\nID 为 'prod1' 的文档：\")\n",
    "pprint(specific_data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 检索类别为 '手机' 的文档\n",
    "filtered_data = vectorstore._collection.get(where={\"category\": \"手机\"})\n",
    "print(\"\\n类别为 '手机' 的文档：\")\n",
    "pprint(filtered_data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 更新 'prod1' 的价格\n",
    "vectorstore._collection.update(\n",
    "    ids=[\"prod1\"],\n",
    "    metadatas=[{\"category\": \"手机\", \"price\": 749.99}]\n",
    ")\n",
    "print(\"\\n已更新 ID 为 'prod1' 的文档价格。\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 删除 ID 为 'prod2' 的文档\n",
    "vectorstore._collection.delete(ids=[\"prod2\"])\n",
    "print(\"\\n已删除 ID 为 'prod2' 的文档。\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取剩余的文档\n",
    "remaining_data = vectorstore._collection.get()\n",
    "print(\"\\n剩余的文档：\")\n",
    "pprint(remaining_data)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义搜索文档的函数\n",
    "def search_documents(query: str) -> str:\n",
    "    retrieved_docs = retriever.get_relevant_documents(query)\n",
    "    if not retrieved_docs:\n",
    "        return \"未找到相关文档。\"\n",
    "    # 返回文档的内容和元数据\n",
    "    result = \"\"\n",
    "    for doc in retrieved_docs:\n",
    "        content = doc.page_content[:1000]  # 限制内容长度\n",
    "        metadata = doc.metadata\n",
    "        result += f\"内容: {content}\\n\"\n",
    "        result += f\"类别: {metadata.get('category', '未知')}\\n\"\n",
    "        result += f\"价格: {metadata.get('price', '未知')}\\n\\n\"\n",
    "    return result\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 从向量存储创建检索器\n",
    "retriever = vectorstore.as_retriever()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试查询\n",
    "test_query = \"MacBook 的价格\"\n",
    "retrieved_docs = retriever.get_relevant_documents(test_query)\n",
    "print(f\"针对 '{test_query}' 检索到的文档数量: {len(retrieved_docs)}\")\n",
    "for i, doc in enumerate(retrieved_docs):\n",
    "    print(f\"文档 {i+1} 内容预览:\\n{doc.page_content[:1000]}\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.agents import Tool\n",
    "\n",
    "# 定义检索工具\n",
    "search_tool = Tool(\n",
    "    name=\"search_documents\",\n",
    "    func=search_documents,\n",
    "    description=\"根据查询搜索并检索相关文档。\"\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_community.chat_models.tongyi import ChatTongyi\n",
    "\n",
    "# 初始化聊天模型\n",
    "llm = ChatTongyi(\n",
    "    model='qwen-plus',\n",
    "    temperature=0,\n",
    "    api_key=os.getenv(\"DASHSCOPE_API_KEY\")\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.agents import initialize_agent, AgentType\n",
    "\n",
    "# 初始化代理\n",
    "agent_executor = initialize_agent(\n",
    "    tools=[search_tool],\n",
    "    llm=llm,\n",
    "    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
    "    verbose=True  # 启用详细模式以查看执行日志\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义用户查询\n",
    "query = \"MacBook Pro的价格是什么？\"\n",
    "\n",
    "# 执行代理\n",
    "try:\n",
    "    result = agent_executor({\"input\": query})\n",
    "    print(\"\\nAgent 的响应：\")\n",
    "    pprint(result)\n",
    "except Exception as e:\n",
    "    print(f\"发生错误: {e}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.document_loaders import TextLoader\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "\n",
    "# 加载外部文本文档\n",
    "loader = TextLoader(\"/root/autodl-tmp/lizhenping/langchain_tutorial/LangChain_CookBook/data/市场分析报告.txt\")\n",
    "document = loader.load()\n",
    "print(f\"加载的文档数量: {len(document)}\")\n",
    "\n",
    "# 将文档拆分为可管理的片段\n",
    "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)\n",
    "texts = text_splitter.split_documents(document)\n",
    "print(f\"拆分后的文档数量: {len(texts)}\")\n",
    "\n",
    "# 预览前几个片段\n",
    "for i, text in enumerate(texts[:3]):\n",
    "    print(f\"\\n文档 {i+1} 内容预览:\\n{text.page_content[:100]}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 初始化嵌入（如果尚未初始化）\n",
    "embeddings = ZhipuAIEmbeddings(\n",
    "    model=\"embedding-2\",\n",
    "    api_key=os.getenv(\"ZHIPU_API_KEY\"),\n",
    "    dimensions=1024\n",
    ")\n",
    "\n",
    "# 使用外部文档初始化 Chroma 向量存储\n",
    "external_db = Chroma.from_documents(texts, embeddings)\n",
    "external_db.persist()\n",
    "print(f\"外部向量存储中的文档数量: {len(external_db)}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为外部向量存储创建检索器\n",
    "external_retriever = external_db.as_retriever()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对外部文档进行测试查询\n",
    "external_query = \"全球新能源汽车销量\"\n",
    "external_retrieved_docs = external_retriever.get_relevant_documents(external_query)\n",
    "print(f\"针对 '{external_query}' 检索到的文档数量: {len(external_retrieved_docs)}\")\n",
    "for i, doc in enumerate(external_retrieved_docs):\n",
    "    print(f\"文档 {i+1} 内容预览:\\n{doc.page_content[:1000]}\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import traceback\n",
    "from langchain.agents import Tool\n",
    "\n",
    "# 定义综合搜索函数\n",
    "def combined_search_documents(query: str) -> str:\n",
    "    \"\"\"同时搜索预定义和外部文档。\"\"\"\n",
    "    retrieved_docs_predefined = retriever.get_relevant_documents(query)\n",
    "    retrieved_docs_external = external_retriever.get_relevant_documents(query)\n",
    "    \n",
    "    if not retrieved_docs_predefined and not retrieved_docs_external:\n",
    "        return \"未找到相关文档。\"\n",
    "    \n",
    "    result = \"\"\n",
    "    # 处理预定义文档\n",
    "    if retrieved_docs_predefined:\n",
    "        result += \"预定义文档:\\n\"\n",
    "        for doc in retrieved_docs_predefined:\n",
    "            content = doc.page_content[:1000]\n",
    "            metadata = doc.metadata\n",
    "            result += f\"内容: {content}\\n类别: {metadata.get('category', '未知')}\\n价格: {metadata.get('price', '未知')}\\n\\n\"\n",
    "    \n",
    "    # 处理外部文档\n",
    "    if retrieved_docs_external:\n",
    "        result += \"外部文档:\\n\"\n",
    "        for doc in retrieved_docs_external:\n",
    "            content = doc.page_content[:1000]\n",
    "            result += f\"内容: {content}\\n\\n\"\n",
    "    \n",
    "    return result\n",
    "\n",
    "# 定义综合检索工具\n",
    "combined_search_tool = Tool(\n",
    "    name=\"combined_search_documents\",\n",
    "    func=combined_search_documents,\n",
    "    description=\"根据查询搜索并检索相关预定义文档和外部文档。\"\n",
    ")\n",
    "\n",
    "# 初始化新的代理执行器，使用综合检索工具\n",
    "combined_agent_executor = initialize_agent(\n",
    "    tools=[combined_search_tool],\n",
    "    llm=llm,\n",
    "    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n",
    "    verbose=True\n",
    ")\n",
    "\n",
    "# 用户查询\n",
    "combined_query = \"请问2022年全球新能源汽车销量达到多少？\"\n",
    "\n",
    "# 执行综合代理\n",
    "try:\n",
    "    combined_result = combined_agent_executor({\"input\": combined_query})\n",
    "    print(\"\\n综合代理的响应：\")\n",
    "    pprint(combined_result)\n",
    "except Exception as e:\n",
    "    print(f\"发生错误: {e}\")\n",
    "    traceback.print_exc()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "chatchat",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
