# -*- coding:utf-8 -*-

import pdfplumber
import json
import re
import os
import logging
from collections import defaultdict

# 禁用 pdfplumber 的 CropBox 警告
logging.getLogger("pdfplumber").setLevel(logging.ERROR)


def extract_pdf_metadata(pdf_path):
    """从文件路径中提取公司名称、年份等信息"""
    filename = os.path.basename(pdf_path)
    pattern = r'(\d{4}-\d{2}-\d{2})__(.*?)__(\d+)__.*?__(\d{4})年'
    match = re.search(pattern, filename)
    if match:
        date = match.group(1)
        company = match.group(2)
        stock_code = match.group(3)
        year = match.group(4)
        return f"{date}__{company}__{stock_code}__年报__{year}年__年度报告.txt"
    return os.path.splitext(filename)[0] + ".txt"


def classify_text(text, page_height, y):
    """根据文本位置判断类型"""
    if y < 50:  # 顶部区域
        return "页眉"
    elif y > page_height - 50:  # 底部区域
        return "页脚"
    elif "□适用" in text or "√适用" in text:  # 勾选框
        return "checkbox"
    elif any(char in text for char in ["[", "]", "'"]):  # 表格特征
        return "excel"
    return "text"


def process_pdf(pdf_path):
    output = []
    allrow_counter = 0
    page_texts = defaultdict(list)

    with pdfplumber.open(pdf_path) as pdf:
        for page_num, page in enumerate(pdf.pages, 1):
            page_height = page.height

            # 显式使用 MediaBox 作为边界框
            bbox = page.mediabox
            cropped_page = page.crop(bbox)

            # 在 crop 之前添加边界调整
            # adjusted_bbox = (
            #     page.mediabox[0] - 5,  # 左边界扩大
            #     page.mediabox[1] - 5,  # 上边界扩大
            #     page.mediabox[2] + 5,  # 右边界扩大
            #     page.mediabox[3] + 5  # 下边界扩大
            # )
            # cropped_page = page.crop(adjusted_bbox)

            # 提取文本和位置信息
            words = cropped_page.extract_words(keep_blank_chars=True, x_tolerance=3)

            # 按行分组
            lines = defaultdict(list)
            for word in words:
                line_key = int(word['top'])
                lines[line_key].append((word['x0'], word['text']))

            # 处理每行文本
            for y, words in sorted(lines.items()):
                line_text = " ".join(text for _, text in sorted(words, key=lambda x: x[0]))
                text_type = classify_text(line_text, page_height, y)

                # 特殊处理表格行
                if text_type == "excel":
                    cells = [text for _, text in sorted(words, key=lambda x: x[0])]
                    line_text = json.dumps(cells, ensure_ascii=False)

                page_texts[page_num].append({
                    "page": page_num,
                    "allrow": allrow_counter,
                    "type": text_type,
                    "inside": line_text.strip()
                })
                allrow_counter += 1

    # 构建最终输出
    file_name = extract_pdf_metadata(pdf_path)
    output.append("<ATTACHMENT_FILE>")
    output.append(f"<FILE_INDEX>File 1</FILE_INDEX>")
    output.append(f"<FILE_NAME>{file_name}</FILE_NAME>")
    output.append("<FILE_CONTENT>")

    for page in sorted(page_texts):
        for item in page_texts[page]:
            output.append(json.dumps(item, ensure_ascii=False))

    output.append("</FILE_CONTENT>")
    output.append("</ATTACHMENT_FILE>")

    return "\n".join(output)


# 使用示例
if __name__ == "__main__":
    pdf_file = "/Users/simon/Code/Git/financial-large-model/data/annual report/pdf/2012_600288_大恒科技.pdf"
    result = process_pdf(pdf_file)

    # 保存结果
    output_file = os.path.splitext(pdf_file)[0].replace('pdf','txt') + ".txt"
    with open(output_file, "w", encoding="utf-8") as f:
        f.write(result)
    print(f"转换完成! 结果已保存至: {output_file}")

