{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "教你如何解决RAG中类似PDF中表格等内容精准向量化的难题, https://zhuanlan.zhihu.com/p/671184770"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pydantic import BaseModel\n",
    "from typing import Any, Optional\n",
    "from unstructured.partition.pdf import partition_pdf\n",
    "from dotenv import load_dotenv; load_dotenv()\n",
    "import os"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "非结构化预处理工具经过微调，可以明确地解决这些问题。使用 partition_pdf，我们将原始 PDF 文档转换为 JSON，使表格和文本具有机器可读性。它还将丰富的元数据归因于文本和表格，这在后处理和下游 NLP 任务中特别有用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "File: os.stat_result(st_mode=33206, st_ino=562949954642224, st_dev=2144071787, st_nlink=1, st_uid=0, st_gid=0, st_size=591216, st_atime=1710576043, st_mtime=1710576043, st_ctime=1710576043)\n",
      "NLTK_DATA: d:/models/nltk_data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "This function will be deprecated in a future release and `unstructured` will simply use the DEFAULT_MODEL from `unstructured_inference.model.base` to set default model name\n",
      "Some weights of the model checkpoint at microsoft/table-transformer-structure-recognition were not used when initializing TableTransformerForObjectDetection: ['model.backbone.conv_encoder.model.layer2.0.downsample.1.num_batches_tracked', 'model.backbone.conv_encoder.model.layer3.0.downsample.1.num_batches_tracked', 'model.backbone.conv_encoder.model.layer4.0.downsample.1.num_batches_tracked']\n",
      "- This IS expected if you are initializing TableTransformerForObjectDetection from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing TableTransformerForObjectDetection from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "# 显示文件状态\n",
    "path = \"./\"\n",
    "file = \"./佳讯飞鸿：2022年年度报告/佳讯飞鸿：2022年年度报告_107-113.pdf\"\n",
    "print(f\"File: {os.stat(file)}\")\n",
    "\n",
    "print(f\"NLTK_DATA: {os.environ['NLTK_DATA']}\")\n",
    "\n",
    "# Get elements\n",
    "raw_pdf_elements = partition_pdf(\n",
    "    filename=file,\n",
    "    # Unstructured first finds embedded image blocks\n",
    "\textract_images_in_pdf=False,\n",
    "\t# Use layout model (YOLOX) to get bounding boxes (for tables) and find titles\n",
    "\t# Titles are any sub-section of the document\n",
    "\tinfer_table_structure=True,\n",
    "\t# Post processing to aggregate text once we have the title\n",
    "\tchunking_strategy=\"by_title\",\n",
    "\t# Chunking params to aggregate text blocks\n",
    "\t# Attempt to create a new chunk 3800 chars\n",
    "\t# Attempt to keep chunks > 2000 chars\n",
    "\tmax_characters=4000,\n",
    "\tnew_after_n_chars=3800,\n",
    "\tcombine_text_under_n_chars=2000\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "探查一下 unstructured 解析出来的内容，把 text 和 table 内容分开放，以备后面继续处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "category types: {\"<class 'unstructured.documents.elements.CompositeElement'>\": 10, \"<class 'unstructured.documents.elements.Table'>\": 10}\n",
      "table_elements: 10\n",
      "text_elements: 10\n"
     ]
    }
   ],
   "source": [
    "class Element(BaseModel):\n",
    "    type: str\n",
    "    text: Any\n",
    "\n",
    "# Categorize by type\n",
    "category_counts = {}\n",
    "categorized_elements = []\n",
    "for element in raw_pdf_elements:\n",
    "    category = str(type(element))\n",
    "    if category in category_counts:\n",
    "        category_counts[category] += 1\n",
    "    else:\n",
    "        category_counts[category] = 1\n",
    "    if \"unstructured.documents.elements.Table\" in str(type(element)):\n",
    "        categorized_elements.append(Element(type=\"table\", text=str(element)))\n",
    "    elif \"unstructured.documents.elements.CompositeElement\" in str(type(element)):\n",
    "        categorized_elements.append(Element(type=\"text\", text=str(element)))\n",
    "\n",
    "print(f\"category types: {category_counts}\")\n",
    "# Tables\n",
    "table_elements = [e for e in categorized_elements if e.type == \"table\"]\n",
    "print(f\"table_elements: {len(table_elements)}\")\n",
    "# output: 28 elements in the PDF file\n",
    "\n",
    "# Text\n",
    "text_elements = [e for e in categorized_elements if e.type == \"text\"]\n",
    "print(f\"text_elements: {len(text_elements)}\") \n",
    "# output: 127 elements in the PDF file"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "导入 LangChain 基础包，引入了 langchain-hub 里面一个社区贡献的 prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "obj: input_variables=['element'] metadata={'lc_hub_owner': 'rlm', 'lc_hub_repo': 'multi-vector-retriever-summarization', 'lc_hub_commit_hash': 'd822e5e6d60be1e8e19b1e849a99ab65d384972cfd2414e4281b386e287b122d'} template='You are an assistant tasked with summarizing tables and text. \\\\ \\nGive a concise summary of the table or text. Table or text chunk: {element}'\n"
     ]
    }
   ],
   "source": [
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.prompts import ChatPromptTemplate\n",
    "from langchain.schema.output_parser import StrOutputParser\n",
    "from langchain import hub\n",
    "obj = hub.pull(\"rlm/multi-vector-retriever-summarization\")\n",
    "print(f\"obj: {obj}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试智谱的langchain模型, 智谱AI发布了新版本API SDKv4, 发现缺少了model_api，联想到Zhipu AI发布了新的API v4版本新SDK，LangChain-Community上面的ChatZhipuAI适配的应该是v3版本的SDK"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "content='你好！很抱歉，作为一个AI，我没有实时的天气更新功能。不过，你可以通过查看当地的天气预报或者使用相关的手机应用来获取今天的天气情况。希望你有一个美好的一天！如果有其他问题或需要帮助，请随时告诉我。' response_metadata={'finish_reason': 'stop', 'index': 0}\n"
     ]
    }
   ],
   "source": [
    "# from langchain_community.chat_models import ChatZhipuAI\n",
    "from utils.zhipuai_zhch import ChatZhipuAI_zhch\n",
    "\n",
    "model = ChatZhipuAI_zhch(\n",
    "    temperature=0.1,\n",
    "    api_key=os.environ[\"ZHIPU_API_KEY\"],\n",
    "    model=\"glm-4\"\n",
    "    )\n",
    "\n",
    "print(model.invoke(\"你好，今天天气如何?\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "ChatZhipuAI，使用智谱模型，用 LCEL 组装一个 chain"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from langchain_community.chat_models import ChatZhipuAI\n",
    "from utils.zhipuai_zhch import ChatZhipuAI_zhch\n",
    "from langchain_core.messages import AIMessage, HumanMessage, SystemMessage\n",
    "\n",
    "# Summary chain\n",
    "# prompt = ChatPromptTemplate.from_template(obj.template)\n",
    "# model = ChatOpenAI(temperature=0,model=\"gpt-4\") \n",
    "template='你是一个财务专家，你的任务是从上下文中提取财务信息，然后回答问题. \\\\ \\n给出表或文本的简明摘要. 表或文本块: {element}'\n",
    "prompt = ChatPromptTemplate.from_template(template)\n",
    "model = ChatZhipuAI_zhch(\n",
    "    temperature=0.1,\n",
    "    api_key=os.environ[\"ZHIPU_API_KEY\"],\n",
    "    model=\"glm-4\",\n",
    ")\n",
    "summarize_chain = {\"element\": lambda x:x} | prompt | model | StrOutputParser()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "通过summarize chain 分别提取 text 和 table 信息"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "content='根据提供的财务数据，以下是摘要：\\n\\n**期初与期末余额：**\\n- 上年期末股本余额：593,718,564.00元\\n- 本年期初股本余额：同样为593,718,564.00元\\n\\n**本期增减变动金额：**\\n- 综合收益总额未单独列出。\\n- 所有者投入和减少资本：\\n  - 所有者投入的普通股：无数据列出。\\n  - 其他权益工具持有者投入资本：6,691,136.98元。\\n  - 股份支付计入所有者权益的金额：无数据列出。\\n  - 其他：包括优先股、永续债等，具体变动为-39,683,075.69元。\\n\\n**具体项目变动：**\\n- 资本公积：增加了751,246,676.86元。\\n- 库存股：减少了26,873,547.89元。\\n- 其他综合收益：减少了3,105,220.32元。\\n- 专项储备：增加了3,390,969.21元。\\n- 盈余公积：增加了82,145,274.15元。\\n- 一般风险准备：减少了12,602,729.17元。\\n- 未分配利润：增加了20,340,059.67元。\\n\\n**所有者权益合计：**\\n- 期初所有者权益合计：2,176,717,804.06元。\\n- 本期增加后所有者权益合计：2,181,115,622.51元。\\n\\n**少数股东权益：**\\n- 期初少数股东权益：4,397,818.45元。\\n- 本期变动后少数股东权益：55,563,418.71元。\\n\\n**其他：**\\n- 其他项下有其他权益工具、永续债等，具体变动未详细列出。\\n\\n请注意，上述摘要基于提供的数据，但某些项目如“综合收益总额”和“所有者投入的普通股”的具体数值未在文本中明确列出。这些数据可能需要结合完整的财务报表进行解读。' response_metadata={'finish_reason': 'stop', 'index': 0}\n",
      "根据提供的财务数据，以下是摘要：\n",
      "\n",
      "**期初与期末余额：**\n",
      "- 上年期末股本余额：593,718,564.00元\n",
      "- 本年期初股本余额：同样为593,718,564.00元\n",
      "\n",
      "**本期增减变动金额：**\n",
      "- 综合收益总额未单独列出。\n",
      "- 所有者投入和减少资本：\n",
      "  - 所有者投入的普通股：无数据列出。\n",
      "  - 其他权益工具持有者投入资本：6,691,136.98元。\n",
      "  - 股份支付计入所有者权益的金额：无数据列出。\n",
      "  - 其他：包括优先股、永续债等，具体变动为-39,683,075.69元。\n",
      "\n",
      "**具体项目变动：**\n",
      "- 资本公积：增加了751,246,676.86元。\n",
      "- 库存股：减少了26,873,547.89元。\n",
      "- 其他综合收益：减少了3,105,220.32元。\n",
      "- 专项储备：增加了3,390,969.21元。\n",
      "- 盈余公积：增加了82,145,274.15元。\n",
      "- 一般风险准备：减少了12,602,729.17元。\n",
      "- 未分配利润：增加了20,340,059.67元。\n",
      "\n",
      "**所有者权益合计：**\n",
      "- 期初所有者权益合计：2,176,717,804.06元。\n",
      "- 本期增加后所有者权益合计：2,181,115,622.51元。\n",
      "\n",
      "**少数股东权益：**\n",
      "- 期初少数股东权益：4,397,818.45元。\n",
      "- 本期变动后少数股东权益：55,563,418.71元。\n",
      "\n",
      "**其他：**\n",
      "- 其他项下有其他权益工具、永续债等，具体变动未详细列出。\n",
      "\n",
      "请注意，上述摘要基于提供的数据，但某些项目如“综合收益总额”和“所有者投入的普通股”的具体数值未在文本中明确列出。这些数据可能需要结合完整的财务报表进行解读。\n"
     ]
    }
   ],
   "source": [
    "# Apply to texts\n",
    "texts = [i.text for i in text_elements]\n",
    "text_summaries = summarize_chain.batch(texts, {\"max_concurrency\": 5})\n",
    "\n",
    "# Apply to tables\n",
    "tables = [i.text for i in table_elements]\n",
    "table_summaries = summarize_chain.batch(tables, {\"max_concurrency\": 5})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "把 text 和 table 信息分别添加到 LangChain 的 multi-vector-retriever 里面"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import uuid\n",
    "from langchain.vectorstores import Chroma\n",
    "from langchain.storage import InMemoryStore\n",
    "from langchain.schema.document import Document\n",
    "# from langchain.embeddings import OpenAIEmbeddings\n",
    "from langchain.retrievers.multi_vector import MultiVectorRetriever\n",
    "from langchain_community.embeddings.sentence_transformer import (\n",
    "    SentenceTransformerEmbeddings,\n",
    ")\n",
    "\n",
    "# create the open-source embedding function\n",
    "embedding_function = SentenceTransformerEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n",
    "\n",
    "# The vectorstore to use to index the child chunks\n",
    "vectorstore = Chroma(\n",
    "    collection_name=\"summaries\",\n",
    "    embedding_function=embedding_function\n",
    ")\n",
    "\n",
    "# The storage layer for the parent documents\n",
    "store = InMemoryStore()\n",
    "id_key = \"doc_id\"\n",
    "\n",
    "# The retriever (empty to start)\n",
    "retriever = MultiVectorRetriever(\n",
    "    vectorstore=vectorstore,\n",
    "    docstore=store,\n",
    "    id_key=id_key,\n",
    ")\n",
    "\n",
    "# Add texts\n",
    "doc_ids = [str(uuid.uuid4()) for _ in texts]\n",
    "summary_texts = [Document(page_content=s,metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries)]\n",
    "retriever.vectorstore.add_documents(summary_texts)\n",
    "retriever.docstore.mset(list(zip(doc_ids, texts)))\n",
    "\n",
    "# Add tables\n",
    "table_ids = [str(uuid.uuid4()) for _ in tables]\n",
    "summary_tables = [Document(page_content=s,metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries)]\n",
    "retriever.vectorstore.add_documents(summary_tables)\n",
    "retriever.docstore.mset(list(zip(table_ids, tables)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "组装 RAG"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from operator import itemgetter\n",
    "from langchain.schema.runnable import RunnablePassthrough\n",
    "\n",
    "# Prompt template\n",
    "template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",
    "{context}\n",
    "Question: {question}\n",
    "\"\"\"\n",
    "prompt = ChatPromptTemplate.from_template(template)\n",
    "\n",
    "# LLM\n",
    "# model = ChatOpenAI(temperature=0)\n",
    "model = ChatZhipuAI_zhch(\n",
    "    temperature=0.1,\n",
    "    api_key=os.environ[\"ZHIPU_API_KEY\"],\n",
    "    model=\"glm-4\"\n",
    "    )\n",
    "\n",
    "# RAG pipeline\n",
    "chain = (\n",
    "    {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
    "    | prompt\n",
    "    | model\n",
    "    | StrOutputParser()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "chain.invoke('比较2021，2022年母公司财务费用，输出采用Json格式，{\"年\": {\"key1\": \"value1\"},}')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
