import os
import re
import logging
import fitz  # PyMuPDF pip install PyMuPDF -i https://pypi.tuna.tsinghua.edu.cn/simple
import pandas as pd

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# 显示所有列
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
# 不换行
pd.set_option('expand_frame_repr', False)


def get_to_df(path):
    df = pd.DataFrame(columns=['名称', '链接', '路径', '链接文字'])

    BRIGHT_GREEN = '\033[92m'
    BRIGHT_YELLOW = '\033[93m'
    BRIGHT_BLUE = '\033[94m'
    BOLD = '\033[1m'
    UNDERLINE = '\033[4m'
    RESET = '\033[0m'

    for root, _, files in os.walk(path):
        for f in files:
            if f.endswith(".pdf"):
                pdf_path = os.path.join(root, f)
                doc = fitz.open(pdf_path)
                logging.info(f"{RESET}{BOLD}{BRIGHT_YELLOW}正在处理文件：{f}{RESET}")

                # 初始化数据
                name = f.replace(".pdf", "")
                path = root
                links = []

                for page_num in range(len(doc)):
                    page = doc.load_page(page_num)
                    link_annot = page.first_link
                    while link_annot:
                        if link_annot.uri and "weixin" not in link_annot.uri:
                            rect = link_annot.rect
                            link_text = page.get_textbox(rect).strip()
                            link_dic = {"链接": link_annot.uri, "链接文字": link_text}
                            links.append(link_dic)
                            logging.info(f"{BOLD}{BRIGHT_BLUE}地址：{pdf_path}{RESET}")
                            logging.info(f"{BOLD}{BRIGHT_GREEN}链接文字：{link_text}{RESET}")
                            logging.info(f"{BOLD}{BRIGHT_GREEN}链接：{link_annot.uri}{RESET}")

                        link_annot = link_annot.next

                if links:
                    # 将链接和对应的文字存储到 DataFrame 中
                    for link in links:
                        df = df._append({
                            '名称': name,
                            '路径': path,
                            '链接': link['链接'],
                            '链接文字': link['链接文字']
                        }, ignore_index=True)

    df.to_excel("pdf_content.xlsx", index=False)


def tissue_df():
    df = pd.read_excel("pdf_content.xlsx")

    # df.drop_duplicates(subset=['链接'], keep='first', inplace=True)
    # df.to_excel("pdf_content.xlsx", index=False)

    save_link_content = ["feishu", "zhihu", "shimo", "yuque", "docs", "notion", "github"]
    pattern = '|'.join(save_link_content)
    df = df[df['链接'].str.contains(pattern, case=False, na=False)]
    df.to_excel("filtered_pdf_content.xlsx", index=False)


def start():
    # get_to_df("Z:\\data\\生财有术【包括第8期】每天更新")
    tissue_df()


if __name__ == '__main__':
    start()
