{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用camelot解析pdf文件\n",
    "```\n",
    "pip install -U camelot-py\n",
    "pip install -U PyPDF2<3.0\n",
    "```\n",
    "Lattice 格子本质上更具确定性，并且不依赖于猜测。它可用于解析单元格之间有分界线的表格，并且可以自动解析页面上存在的多个表格。\n",
    "采用Lattice需要安装ghostscript，https://camelot-py.readthedocs.io/en/master/user/install-deps.html\n",
    "安装后，设置环境变量：PATH，\n",
    "C:\\Program Files\\gs\\gs10.03.0\\bin\n",
    "C:\\Program Files\\gs\\gs10.03.0\\lib\n",
    "\n",
    "或者直接设置环境变量：\n",
    "export PATH=\"C:\\Program Files\\gs\\gs10.03.0\\bin;C:\\Program Files\\gs\\gs10.03.0\\lib:$PATH\"\n",
    "\n",
    "或者backend设置为其他的图像解析器\n",
    "tables = camelot.read_pdf(file, flavor='lattice', pages='all', backend=\"poppler\")\n",
    "\n",
    "##### 参考：\n",
    "1. [python camelot参数详解--提取PDF指定区域中的表格](https://blog.csdn.net/luckilycc/article/details/132322812)\n",
    "2. [Python 超强大的PDF表格提取器 — Camelot](https://zhuanlan.zhihu.com/p/366190934)\n",
    "3. [LangChain:万能的非结构化文档载入详解（一）](https://zhuanlan.zhihu.com/p/624812261)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "根据从某行往下单元格内容是否全是数字或为空，判断表头"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import pandas as pd\n",
    "\n",
    "# 将三位一撇金额转换为浮点数，空为np.nan\n",
    "def amount_to_float(s: str):\n",
    "    if type(s) == str and s.strip() == '':\n",
    "        # return pd.NA\n",
    "        return ''\n",
    "    else:\n",
    "        if(is_amount(s)):\n",
    "            return float(s.replace(',', '').replace('$', '').replace('€', '').replace('£', '').replace('¥', ''))\n",
    "        else:\n",
    "            return s\n",
    "\n",
    "def is_amount(s: str):\n",
    "    # 正则表达式模式\n",
    "    # pattern = r'^\\$?(\\d{1,3}(,\\d{3})*(\\.\\d{2})?|\\d{1,3}(\\.\\d{2})?)$'\n",
    "    pattern = r'^[\\$\\€\\£\\¥]?[-+]?\\d{1,3}(,\\d{3})*(\\.\\d{2})?$'\n",
    "    # 使用正则表达式匹配字符串\n",
    "    match = re.match(pattern, s)\n",
    "    \n",
    "    # 如果匹配成功，返回 True，否则返回 False\n",
    "    return bool(match)\n",
    "\n",
    "def is_text(s: str):\n",
    "    # 正则表达式模式，判断是否为纯文本\n",
    "    pattern = r'^\\d+$' \n",
    "    match = re.match(r'^\\d+$', s)\n",
    "    # \"123abc\" 应该返回 True\n",
    "    return not bool(match)\n",
    "\n",
    "# 简单的方法来猜测表头行数\n",
    "def guess_header_rows(df):\n",
    "\n",
    "    # for i in range(df.shape[0]).reverse():\n",
    "        # 检查某一行以下是否数据类型变得更加一致，这里只是一个非常简单的示例\n",
    "        # 第一列是列头，不判断数字，所以从第二列开始判断，单元格是数字或为空\n",
    "        # if df.iloc[i:, 1:].apply(lambda col: col.apply(lambda x: is_amount(x) or x.strip() == '')).all().all():\n",
    "        #     return i+1\n",
    "    # 代表报表只有表头，没有数据\n",
    "    # return df.shape[0]\n",
    "\n",
    "    # 倒序遍历\n",
    "    i = df.shape[0]\n",
    "    while i > 0:\n",
    "        # 检查某一行以上是否全是文本, [0,1)\n",
    "        if df.iloc[:i, 1:].apply(lambda col: col.apply(lambda x: not is_amount(x) and x.strip() != '')).all().all():\n",
    "            return i\n",
    "        i -= 1\n",
    "    # 代表全是数据\n",
    "    return i"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将多级表头转换为单级表头\n",
    "def flatten_multi_header(df: pd.DataFrame, header_rows: int) -> list[str]:\n",
    "    header_list = []\n",
    "    # 获取多级表头，放入header_list中\n",
    "    for row in range(header_rows):\n",
    "        row_header = []\n",
    "        for col in range(df.shape[1]):\n",
    "            # 获取表头, 将空格和换行符去掉\n",
    "            row_header.append(df.iloc[row, col].strip().replace('\\n', ''))\n",
    "        header_list.append(row_header)\n",
    "    \n",
    "    # 如果表头行数大于1，将表头合并\n",
    "    if len(header_list) > 1:\n",
    "        \"\"\"\n",
    "        如果逐行每行扫描，如果后一个单元格为空，将前一个单元格合并到后一个单元格\n",
    "        例如：\n",
    "        ['项目', '2022 年度', '', '', '', '', '', '', '', '', '', '', '', '', '', '']\n",
    "        ['项目', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度', '2022 年度']\n",
    "\n",
    "        ['', '归属于母公司所有者权益', '', '', '', '', '', '', '', '', '', '', '', '', '少数股东权益', '所有者权益合计']\n",
    "        变换后为：['', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', ['', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '少数股东权益', '所有者权益合计']'少数股东权益', '所有者权益合计']\n",
    "        \"\"\"\n",
    "        for i in range(header_rows - 1):\n",
    "            j = 0\n",
    "            while j < len(header_list[i + 1]):\n",
    "                # 判断本行是横向分组还是纵向分组\n",
    "                # 如果header_list[i][j]有值，遍历该行后面的单元格直到遇到不为空的单元格，或者遍历到末尾\n",
    "                row_group = False\n",
    "                col_group = False\n",
    "                # 标记有值的单元格位置\n",
    "                pos = 0\n",
    "                if header_list[i][j] != '':\n",
    "                    # 如果从头往后遍历，[i,j]有值\n",
    "                    for k in range(j + 1, len(header_list[i])):\n",
    "                        if header_list[i][k] != '':\n",
    "                            pos = k\n",
    "                            break\n",
    "                        elif k == len(header_list[i]) -1:\n",
    "                            # 一直到行尾都为''\n",
    "                            pos = len(header_list[i])\n",
    "                    # 判断下一行相同列范围j列到pos-1列，如果有值，就是横向分组\n",
    "                    if pos > j:\n",
    "                        for k1 in range(j, pos):\n",
    "                            if header_list[i+1][k1] != '':\n",
    "                                row_group = True\n",
    "                                break\n",
    "                        if row_group == True:\n",
    "                            for k1 in range(j+1, pos):\n",
    "                                header_list[i][k1] = header_list[i][j]\n",
    "                            j = pos \n",
    "                            continue\n",
    "                else:\n",
    "                    # 为空就是纵向分组\n",
    "                    if i > 0:\n",
    "                        header_list[i][j] = header_list[i-1][j]\n",
    "                \n",
    "                j = j + 1\n",
    "    \n",
    "        \"\"\"\n",
    "        纵向合并，如果后一个单元格为空，将前一个单元格复制到后一个单元格，\n",
    "        如果后一个单元格不为空，将前一个单元格合并到后一个单元格，\n",
    "        例如：2022年度-归属于母公司所有者权益\n",
    "        head2: ['项目', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '归属于母公司所有者权益', '少数股东权益', '所有者权益合计']\n",
    "        head3: ['项目', '股本', '其他权益工具', '其他权益工具', '其他权益工具', '资本公积', '减：库存股', '其他综合收益', '专项储备', '盈余公积', '一般风险准备', '未分配利润', '其 他', '小计', '少数股东权益', '所有者权益合计']\n",
    "        \"\"\"\n",
    "        for col in range(len(header_list[0])):\n",
    "            for i in range(header_rows):\n",
    "                if header_list[i][col] == '':\n",
    "                    if i > 0:\n",
    "                        header_list[i][col] = header_list[i - 1][col]\n",
    "                else:\n",
    "                    if i > 0:\n",
    "                        if header_list[i][col] != header_list[i - 1][col].split('-')[-1]:\n",
    "                        # 如果与上一行同列单元格值不同，合并\n",
    "                            header_list[i][col] = header_list[i - 1][col] + \"-\" + header_list[i][col]\n",
    "                        else:\n",
    "                            header_list[i][col] = header_list[i - 1][col] \n",
    "\n",
    "    return header_list[-1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 处理跨页报表，\n",
    "1. 判断表头，如果一行往后全是数字或空，代表这一行以上是表头\n",
    "2. 跨页报表，后一页的开始一定没有表头"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "from pydantic import BaseModel\n",
    "from typing import TypeVar\n",
    "\n",
    "PandasDataFrame = TypeVar('pandas.core.frame.DataFrame')\n",
    "\n",
    "class SheetInfo(BaseModel):\n",
    "    table_idx: int\n",
    "    page_no: int\n",
    "    sheet_data: PandasDataFrame\n",
    "\n",
    "# 标准化表格，将多级表头转换为单级表头，将表格中的数字转化为浮点数\n",
    "def extract_table(table_idx: int, page_no: int, df: pd.DataFrame, report_list: list[SheetInfo])->pd.DataFrame:\n",
    "    \"\"\"ArithmeticError\n",
    "    :param df: 一个表格\n",
    "    :param report_list: 一个列表，用于存储处理后的表格\n",
    "    :return: 返回处理后的表格名称\n",
    "    \"\"\"\n",
    "    temp_header = df.iloc[0][0].split('\\n')\n",
    "    # 如果temp_header长度等于表格的列数，且第一行其他列为空\n",
    "    if len(temp_header) == df.shape[1] and df.iloc[0, 1:].apply(lambda x: x.strip() == '').all():\n",
    "        # 需要将表头拆分到其他列\n",
    "        for i in range(0, len(temp_header)):\n",
    "            df.iloc[0][i] = temp_header[i]\n",
    "\n",
    "    if not hasattr(extract_table, 'only_header'):\n",
    "        extract_table.only_header = False  # 首次调用时初始化静态变量\n",
    "        extract_table.new_header = []\n",
    "\n",
    "    if extract_table.only_header:\n",
    "        # 将之前的表格与当前的表格header合并\n",
    "        sheet_info = report_list.pop()\n",
    "        df_pre = sheet_info.sheet_data\n",
    "        df = pd.concat([df_pre, df], axis=0, ignore_index=True)\n",
    "        table_idx = sheet_info.table_idx\n",
    "        page_no = sheet_info.page_no\n",
    "\n",
    "    header_rows = guess_header_rows(df)\n",
    "    if header_rows == df.shape[0]:  # 表格中只有表头\n",
    "        # 只可能表头跨一页\n",
    "        if extract_table.new_header == False:\n",
    "            extract_table.only_header = True\n",
    "            report_list.append(SheetInfo(table_idx=table_idx, page_no=page_no, sheet_data=df))\n",
    "            return df\n",
    "        else:\n",
    "            extract_table.only_header = False\n",
    "    else:\n",
    "        extract_table.only_header = False\n",
    "    \n",
    "    # 将表格中的数据转化为浮点数\n",
    "    # 若要转换整个DataFrame且其中可能包含非数字字符串，则需逐列处理或筛选出需要转换的列\n",
    "    # numeric_columns = df.iloc[header_rows:, 1:].select_dtypes(include=[object]).columns.tolist()  # 获取需要转换的列名列表\n",
    "    # df.loc[df.index[header_rows:], numeric_columns] = df.loc[df.index[header_rows:], numeric_columns].apply(lambda col: col.apply(lambda x: amount_to_float(x)))\n",
    "    df.iloc[header_rows:, 1:] = df.iloc[header_rows:, 1:].apply(lambda col: col.apply(lambda x: amount_to_float(x)))\n",
    "    df.iloc[:,0] = df.iloc[:, 0].str.replace('\\n', '').str.strip()\n",
    "\n",
    "    # 如果表格有表头，处理表格\n",
    "    if header_rows > 0:\n",
    "        if header_rows == df.shape[0]:  # 非财务表格，没有数据区\n",
    "            new_header = df.iloc[0]\n",
    "            header_rows = 1\n",
    "        else:\n",
    "            new_header = flatten_multi_header(df, header_rows)\n",
    "        \n",
    "        # 设置新的列名称ma\n",
    "        df.columns = new_header\n",
    "        \n",
    "        # 删除原始表头行\n",
    "        df = df.iloc[header_rows:]\n",
    "        \n",
    "        # 重置索引\n",
    "        df.reset_index(drop=True, inplace=True)\n",
    "        print(f\"Processed Table {table_idx}: {df.shape}\")\n",
    "        # print(df.head())  # 打印前几行作为示例    \n",
    "        report_list.append(SheetInfo(table_idx=table_idx, page_no=page_no, sheet_data=df))\n",
    "        return df\n",
    "    else:\n",
    "        # 如果表格没有表头，代表是跨页的表格，添加到上一个表格的末尾\n",
    "        # 设置新的列名称\n",
    "        sheet_info = report_list.pop()\n",
    "        df_pre = sheet_info.sheet_data\n",
    "        table_idx = sheet_info.table_idx\n",
    "        page_no = sheet_info.page_no\n",
    "        df.columns = df_pre.columns\n",
    "        # 删除原始表头行\n",
    "        df = df.iloc[header_rows:]\n",
    "        # 重置索引\n",
    "        df.reset_index(drop=True, inplace=True)\n",
    "        print(f\"Table {table_idx}: 合并到上一个表格df：{df.shape}\")\n",
    "        df_pre = pd.concat([df_pre, df], axis=0, ignore_index=True)\n",
    "        report_list.append(SheetInfo(table_idx=table_idx, page_no=page_no, sheet_data=df_pre))\n",
    "        print(f\"Table {table_idx}: 合并后df：{df_pre.shape}\")\n",
    "        return df_pre\n",
    "    \n",
    "    return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "\n",
    "import warnings\n",
    "\n",
    "from camelot.handlers import PDFHandler, Lattice, Stream, TableList\n",
    "from camelot.utils import validate_input, remove_extra, TemporaryDirectory\n",
    "from pdfminer.layout import LTTextContainer, LTChar\n",
    "from collections import Counter\n",
    "import math\n",
    "import re\n",
    "\n",
    "class PDFHandler_zhch(PDFHandler):\n",
    "    \"\"\"\n",
    "    提取表格标题\n",
    "    \"\"\"\n",
    "\n",
    "    def recognize_title(self, text):\n",
    "        \"\"\"\n",
    "        判断是否是以中文数字或阿拉伯数字开头跟\".\"或顿号的文本\n",
    "        \"\"\"\n",
    "        pattern = r'^(?:[\\u4e00-\\u9fa5]{1,4}(?:[、.])|[\\d]+[、.])'\n",
    "        matches = re.match(pattern, text)\n",
    "        return matches\n",
    "\n",
    "    def get_ltchars_font_size(self, ltchars):\n",
    "        font_sizes = Counter()\n",
    "        for character in ltchars:\n",
    "            # 如果是字符，则获取其字体大小\n",
    "            if isinstance(character, LTChar):\n",
    "                font_size = round(character.size, 3)\n",
    "                font_sizes[font_size] += 1\n",
    "        # 找到出现次数最多的字体大小\n",
    "        most_common_font_size = font_sizes.most_common(1)[0][0]\n",
    "        return most_common_font_size\n",
    "    \n",
    "    def extract_table_titles(self, parser, tables, body_font_size):\n",
    "        \"\"\"Extracts table title from a single page PDF.\n",
    "        离表格开头行最近的文字末尾有“表”的文字\n",
    "        y坐标是从下往上增加的\n",
    "\n",
    "        Parameters\n",
    "        parser : Lattice\n",
    "        tables : [Table] 一页中可能有多张报表\n",
    "        Returns\n",
    "        -------\n",
    "        titles : [str]\n",
    "        \"\"\"\n",
    "        \n",
    "        # 计算每个报表标题Y坐标范围\n",
    "        title_y_scopes = []\n",
    "        y_scope = ()\n",
    "        for i, t in enumerate(tables):\n",
    "            if i == 0:\n",
    "                y_scopy = (t._bbox[3], parser.layout.y1)\n",
    "                title_y_scopes.append(y_scopy)\n",
    "            else:\n",
    "                y_scopy = (t._bbox[3], tables[i-1]._bbox[1])\n",
    "                title_y_scopes.append(y_scopy)\n",
    "        \n",
    "        # 将报表标题置空\n",
    "        titles = []\n",
    "        title = ()\n",
    "        for i, y_scope in enumerate(title_y_scopes):\n",
    "            titles.append((\"\", 9999))\n",
    "        # 推测标题\n",
    "        for horizontal_text in parser.horizontal_text:\n",
    "            for i, y_scope in enumerate(title_y_scopes):\n",
    "                if horizontal_text.y1 > y_scope[0] and horizontal_text.y1 < y_scope[1]:\n",
    "                    text = horizontal_text.get_text().strip()\n",
    "                    # text_font_size = self.get_ltchars_font_size(horizontal_text._objs)\n",
    "                    # 如果有文字，且最后一个字符是“表”\n",
    "                    if text and text[-1] == \"表\" or self.recognize_title(text):\n",
    "                        title = (text, horizontal_text.y1)\n",
    "                        if title[1] < titles[i][1] :\n",
    "                            titles[i] = title\n",
    "        return titles\n",
    "        \n",
    "    \"\"\"\n",
    "    提取表格的同时提取中文报表名称，规则为最靠近表格顶部末尾带“表”的文本\n",
    "    \"\"\"\n",
    "    def parse(\n",
    "        self, flavor=\"lattice\", suppress_stdout=False, layout_kwargs={}, **kwargs\n",
    "    ):\n",
    "        \"\"\"Extracts tables by calling parser.get_tables on all single\n",
    "        page PDFs.\n",
    "\n",
    "        Parameters\n",
    "        ----------\n",
    "        flavor : str (default: 'lattice')\n",
    "            The parsing method to use ('lattice' or 'stream').\n",
    "            Lattice is used by default.\n",
    "        suppress_stdout : str (default: False)\n",
    "            Suppress logs and warnings.\n",
    "        layout_kwargs : dict, optional (default: {})\n",
    "            A dict of `pdfminer.layout.LAParams <https://github.com/euske/pdfminer/blob/master/pdfminer/layout.py#L33>`_ kwargs.\n",
    "        kwargs : dict\n",
    "            See camelot.read_pdf kwargs.\n",
    "            body_font_size : 正文字体大小 The font size of the body text.\n",
    "\n",
    "        Returns\n",
    "        -------\n",
    "        tables : camelot.core.TableList\n",
    "            List of tables found in PDF.\n",
    "\n",
    "        \"\"\"\n",
    "        tables = []\n",
    "        titles = []\n",
    "        with TemporaryDirectory() as tempdir:\n",
    "            for p in self.pages:\n",
    "                self._save_page(self.filepath, p, tempdir)\n",
    "            pages = [os.path.join(tempdir, f\"page-{p}.pdf\") for p in self.pages]\n",
    "            parser = Lattice(**kwargs) if flavor == \"lattice\" else Stream(**kwargs)\n",
    "            for p in pages:\n",
    "                t = parser.extract_tables(\n",
    "                    p, suppress_stdout=suppress_stdout, layout_kwargs=layout_kwargs\n",
    "                )\n",
    "                t_list = self.extract_table_titles(parser, t, body_font_size = kwargs[\"body_font_size\"])\n",
    "                tables.extend(t)\n",
    "                titles.extend(t_list)\n",
    "        return TableList(sorted(tables)), titles\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_pdf_zhch(\n",
    "    filepath,\n",
    "    pages=\"1\",\n",
    "    password=None,\n",
    "    flavor=\"lattice\",\n",
    "    suppress_stdout=False,\n",
    "    layout_kwargs={},\n",
    "    **kwargs\n",
    "):\n",
    "    \"\"\"Read PDF and return extracted tables.\n",
    "\n",
    "    Note: kwargs annotated with ^ can only be used with flavor='stream'\n",
    "    and kwargs annotated with * can only be used with flavor='lattice'.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    filepath : str\n",
    "        Filepath or URL of the PDF file.\n",
    "    pages : str, optional (default: '1')\n",
    "        Comma-separated page numbers.\n",
    "        Example: '1,3,4' or '1,4-end' or 'all'.\n",
    "    password : str, optional (default: None)\n",
    "        Password for decryption.\n",
    "    flavor : str (default: 'lattice')\n",
    "        The parsing method to use ('lattice' or 'stream').\n",
    "        Lattice is used by default.\n",
    "    suppress_stdout : bool, optional (default: True)\n",
    "        Print all logs and warnings.\n",
    "    layout_kwargs : dict, optional (default: {})\n",
    "        A dict of `pdfminer.layout.LAParams <https://github.com/euske/pdfminer/blob/master/pdfminer/layout.py#L33>`_ kwargs.\n",
    "    table_areas : list, optional (default: None)\n",
    "        List of table area strings of the form x1,y1,x2,y2\n",
    "        where (x1, y1) -> left-top and (x2, y2) -> right-bottom\n",
    "        in PDF coordinate space.\n",
    "    columns^ : list, optional (default: None)\n",
    "        List of column x-coordinates strings where the coordinates\n",
    "        are comma-separated.\n",
    "    split_text : bool, optional (default: False)\n",
    "        Split text that spans across multiple cells.\n",
    "    flag_size : bool, optional (default: False)\n",
    "        Flag text based on font size. Useful to detect\n",
    "        super/subscripts. Adds <s></s> around flagged text.\n",
    "    strip_text : str, optional (default: '')\n",
    "        Characters that should be stripped from a string before\n",
    "        assigning it to a cell.\n",
    "    row_tol^ : int, optional (default: 2)\n",
    "        Tolerance parameter used to combine text vertically,\n",
    "        to generate rows.\n",
    "    column_tol^ : int, optional (default: 0)\n",
    "        Tolerance parameter used to combine text horizontally,\n",
    "        to generate columns.\n",
    "    process_background* : bool, optional (default: False)\n",
    "        Process background lines.\n",
    "    line_scale* : int, optional (default: 15)\n",
    "        Line size scaling factor. The larger the value the smaller\n",
    "        the detected lines. Making it very large will lead to text\n",
    "        being detected as lines.\n",
    "    copy_text* : list, optional (default: None)\n",
    "        {'h', 'v'}\n",
    "        Direction in which text in a spanning cell will be copied\n",
    "        over.\n",
    "    shift_text* : list, optional (default: ['l', 't'])\n",
    "        {'l', 'r', 't', 'b'}\n",
    "        Direction in which text in a spanning cell will flow.\n",
    "    line_tol* : int, optional (default: 2)\n",
    "        Tolerance parameter used to merge close vertical and horizontal\n",
    "        lines.\n",
    "    joint_tol* : int, optional (default: 2)\n",
    "        Tolerance parameter used to decide whether the detected lines\n",
    "        and points lie close to each other.\n",
    "    threshold_blocksize* : int, optional (default: 15)\n",
    "        Size of a pixel neighborhood that is used to calculate a\n",
    "        threshold value for the pixel: 3, 5, 7, and so on.\n",
    "\n",
    "        For more information, refer `OpenCV's adaptiveThreshold <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#adaptivethreshold>`_.\n",
    "    threshold_constant* : int, optional (default: -2)\n",
    "        Constant subtracted from the mean or weighted mean.\n",
    "        Normally, it is positive but may be zero or negative as well.\n",
    "\n",
    "        For more information, refer `OpenCV's adaptiveThreshold <https://docs.opencv.org/2.4/modules/imgproc/doc/miscellaneous_transformations.html#adaptivethreshold>`_.\n",
    "    iterations* : int, optional (default: 0)\n",
    "        Number of times for erosion/dilation is applied.\n",
    "\n",
    "        For more information, refer `OpenCV's dilate <https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#dilate>`_.\n",
    "    resolution* : int, optional (default: 300)\n",
    "        Resolution used for PDF to PNG conversion.\n",
    "\n",
    "    Returns\n",
    "    -------\n",
    "    tables : camelot.core.TableList\n",
    "\n",
    "    \"\"\"\n",
    "    if flavor not in [\"lattice\", \"stream\"]:\n",
    "        raise NotImplementedError(\n",
    "            \"Unknown flavor specified.\" \" Use either 'lattice' or 'stream'\"\n",
    "        )\n",
    "\n",
    "    with warnings.catch_warnings():\n",
    "        if suppress_stdout:\n",
    "            warnings.simplefilter(\"ignore\")\n",
    "\n",
    "        validate_input(kwargs, flavor=flavor)\n",
    "        p = PDFHandler_zhch(filepath, pages=pages, password=password)\n",
    "        kwargs = remove_extra(kwargs, flavor=flavor)\n",
    "        tables, titles = p.parse(\n",
    "            flavor=flavor,\n",
    "            suppress_stdout=suppress_stdout,\n",
    "            layout_kwargs=layout_kwargs,\n",
    "            **kwargs\n",
    "        )\n",
    "        return tables, titles"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "读取PDF文件，将表格转换为DataFrame，然后将DataFrame保存为Excel文件\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<TableList n=15>\n",
      "titles: [('合并资产负债表', 254.28104000000002), ('', 9999), ('', 9999), ('母公司资产负债表', 677.93104), ('', 9999), ('合并利润表', 720.2910400000001), ('', 9999), ('', 9999), ('母公司利润表', 622.49104), ('', 9999), ('合并现金流量表', 276.96103999999997), ('', 9999), ('', 9999), ('母公司现金流量表', 305.28103999999996), ('', 9999)]\n"
     ]
    }
   ],
   "source": [
    "import camelot\n",
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "# 1.读取pdf\n",
    "# file = '../佳讯飞鸿：2022年年度报告/佳讯飞鸿：2022年年度报告_107-113.pdf'\n",
    "# file =  '../佳讯飞鸿：2022年年度报告/佳讯飞鸿：2022年年度报告.pdf'\n",
    "file =  '../年报/600916_中国黄金_2002年报.pdf'\n",
    "# 从带路径文件名中分离出文件名，且去掉后缀\n",
    "file_basename = file.split('/')[-1].split('.')[0]\n",
    "\n",
    "# update_app_path(\"gs10.03.0\", \"C:/Program Files/gs/gs10.03.0/lib\")\n",
    "# Path where i have gs(ghost script) installed\n",
    "# dll_dir = 'C:/Program Files/gs/gs10.03.0/lib'\n",
    "# Add the directory to the PATH variable\n",
    "# os.environ['PATH'] += ';' + dll_dir\n",
    "\n",
    "# tables = camelot.read_pdf(file, flavor='lattice', pages='all', backend=\"poppler\")\n",
    "# tables = camelot.read_pdf(file, flavor='lattice', pages='all')\n",
    "tables, titles = read_pdf_zhch(file, flavor='lattice', pages='83-94', body_font_size=9.0)\n",
    "print(tables)\n",
    "print(f\"titles: {titles}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Processed Table 0: (7, 4)\n",
      "Table 0: 合并到上一个表格df：(48, 4)\n",
      "Table 0: 合并后df：(55, 4)\n",
      "Table 0: 合并到上一个表格df：(45, 4)\n",
      "Table 0: 合并后df：(100, 4)\n",
      "Processed Table 3: (37, 4)\n",
      "Table 3: 合并到上一个表格df：(45, 4)\n",
      "Table 3: 合并后df：(82, 4)\n",
      "Processed Table 5: (32, 4)\n",
      "Table 5: 合并到上一个表格df：(29, 4)\n",
      "Table 5: 合并后df：(61, 4)\n",
      "Processed Table 7: (3, 1)\n",
      "Processed Table 8: (24, 4)\n",
      "Table 8: 合并到上一个表格df：(20, 4)\n",
      "Table 8: 合并后df：(44, 4)\n",
      "Processed Table 10: (6, 4)\n",
      "Table 10: 合并到上一个表格df：(33, 4)\n",
      "Table 10: 合并后df：(39, 4)\n",
      "Table 10: 合并到上一个表格df：(18, 4)\n",
      "Table 10: 合并后df：(57, 4)\n",
      "Processed Table 13: (8, 4)\n",
      "Table 13: 合并到上一个表格df：(30, 4)\n",
      "Table 13: 合并后df：(38, 4)\n",
      "export sheet_name: 合并资产负债表-Page83\n",
      "export sheet_name: 母公司资产负债表-Page86\n",
      "export sheet_name: 合并利润表-Page88\n",
      "export sheet_name: 合并利润表-1-Page90\n",
      "export sheet_name: 母公司利润表-Page90\n",
      "export sheet_name: 合并现金流量表-Page91\n",
      "export sheet_name: 母公司现金流量表-Page93\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "# 对于提取的每个表格，处理多行表头\n",
    "report_list = []\n",
    "for table_number, table in enumerate(tables, start=0):\n",
    "    # 将表格转换为DataFrame\n",
    "    df = table.df\n",
    "    if df.empty:\n",
    "        continue\n",
    "    # extract_df = extract_table(\"table-\" + str(table_number+1) + \"-\" + titles[table_number][0], df, report_list)\n",
    "    # table_number 不等于 table.page，因为一页有可能有多张报表\n",
    "    f = extract_table(table_number, table.page, df, report_list)\n",
    "  \n",
    "\"\"\"\n",
    "由于report_list中的DataFrame可能有中间的临时结果（report_list.pop()）,所以要都处理完再输出excel文件\n",
    "将report_list导出为excel文件, 创建一个ExcelWriter对象，指定要写入的excel文件名\n",
    "\"\"\"\n",
    "sheet_name_pre = \"\"\n",
    "sheet_name_null_no = 0\n",
    "sheet_names = []\n",
    "with pd.ExcelWriter(\"./output/\" + file_basename+\".xlsx\") as writer:\n",
    "    for i, sheet_info in enumerate(report_list, start=0):\n",
    "        df = sheet_info.sheet_data\n",
    "        # 设置报表表头，如果报表表头为空，使用上一张报表表头\n",
    "        if titles[sheet_info.table_idx][0] == \"\":\n",
    "            sheet_name_null_no += 1\n",
    "            sheet_names.append(sheet_name_pre + \"-\" + str(sheet_name_null_no) + \"-Page\" + str(sheet_info.page_no)) \n",
    "        else:\n",
    "            sheet_name_pre = titles[sheet_info.table_idx][0] \n",
    "            sheet_name_null_no = 0\n",
    "            sheet_names.append(sheet_name_pre + \"-Page\" + str(sheet_info.page_no))\n",
    "        df.to_excel(writer, sheet_name=sheet_names[i], index=False)\n",
    "        # html_table = df.to_html()\n",
    "        # markdown_str = df.to_markdown()\n",
    "        # 写入到文件\n",
    "        # with open(sheet_names[i] + '.md', 'w') as f:\n",
    "            # f.write(markdown_str)\n",
    "        print(f\"export sheet_name: {sheet_names[i]}\")\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
