import pdfplumber
import re
from collections import defaultdict  # 新增统计容器


def extract_abstracts(pdf_path):
    abstract_patterns = {
        "chinese": {
            "start": re.compile(r"摘\s*要"),
            "stop": re.compile(
                r"关键词|Key\s*words|"
                r"ABSTRACT|Abstract|Summary|"
                # 修改：确保章节标题无缩进，且不因缩进提前终止
                r"^(?!\s)(?:第\s*(?:\d+|[\u4e00-\u9fa5]+)\s*(?:章|节|部分)|"
                # r"[\u4e00-\u9fa5一二三四五六七八九十]+\s*.\s+\S+)"
                + r"[一二三四五六七八九十零佰仟拾廿卅]+(?:\.\d+)?\s*[、．]\s+\S+)"  # 仅中文数字开头（正确）
            )
        },
        "english": {
            # 同时匹配Abstract和Summary（忽略大小写）
            "start": re.compile(r"(Abstract|Summary)", re.IGNORECASE),
            "stop": re.compile(r"Key\s*words|Keywords|^Chapter\s+\d+")
        }
    }

    # 新增统计结构（保留原abstracts文本，新增统计字段）
    abstracts = defaultdict(lambda: {
        "text": "",
        "start_page": 0,
        "end_page": 0,
        "char_count": 0,
        "word_count": 0
    })
    current_abstract = None
    found_chinese = False
    buffer = []  # 内容缓冲区
    page_buffer = []  # 新增：按页缓存内容（处理跨页统计）
    char_counter = 0  # 中文统计
    word_counter = 0  # 英文统计

    with pdfplumber.open(pdf_path) as pdf:
        for page in pdf.pages:
            page_num = page.page_number  # 新增：获取当前页码
            lines = [line.strip() for line in page.extract_text().split('\n') if line.strip()]

            for line in lines:
                # --- 1. 中文摘要处理 ---
                if not current_abstract and not found_chinese:
                    if abstract_patterns["chinese"]["start"].search(line):
                        current_abstract = "chinese"
                        found_chinese = True
                        abstracts["chinese"]["start_page"] = page_num  # 记录起始页
                        start_pos = abstract_patterns["chinese"]["start"].search(line).end()
                        buffer.append(line[start_pos:])
                        page_buffer.append(line[start_pos:])  # 初始化页缓存
                        print(page_buffer)
                    continue

                if current_abstract == "chinese":
                    # 新增：累加中文统计（过滤终止词）
                    if not abstract_patterns["chinese"]["stop"].search(line):
                        char_counter += len(re.findall(r'[\u4e00-\u9fa5]', line))  # 纯中文字符
                        page_buffer.append(line)
                        # print(page_buffer)
                    # 终止处理（新增页数统计）
                    if abstract_patterns["chinese"]["stop"].search(line):
                        abstracts["chinese"]["text"] = ' '.join(page_buffer).strip()
                        abstracts["chinese"]["end_page"] = page_num  # 记录终止页
                        abstracts["chinese"]["char_count"] = char_counter  # 总字符数
                        buffer.clear()
                        page_buffer.clear()
                        char_counter = 0
                        current_abstract = None
                    continue

                # --- 2. 英文摘要处理 ---
                if not current_abstract and found_chinese:
                    start_match = abstract_patterns["english"]["start"].search(line)
                    if start_match:
                        current_abstract = "english"
                        abstracts["english"]["start_page"] = page_num  # 记录起始页
                        lang_type = start_match.group(1).lower()
                        start_pos = start_match.end()
                        buffer.append(f"({lang_type}) {line[start_pos:]}")
                        page_buffer.append(f"({lang_type}) {line[start_pos:]}")
                    continue

                if current_abstract == "english":
                    # 新增：累加英文统计（过滤终止词）
                    if not abstract_patterns["english"]["stop"].search(line):
                        word_counter += len(line.split())  # 空格分隔单词
                        page_buffer.append(line)

                    # 终止处理（新增页数统计）
                    if abstract_patterns["english"]["stop"].search(line):
                        abstracts["english"]["text"] = ' '.join(page_buffer).strip()
                        abstracts["english"]["end_page"] = page_num  # 记录终止页
                        abstracts["english"]["word_count"] = word_counter  # 总单词数
                        buffer.clear()
                        page_buffer.clear()
                        word_counter = 0
                        current_abstract = None
                    continue

    # 处理未正常终止的跨页摘要（新增）
    for lang in ["chinese", "english"]:
        if current_abstract == lang:
            abstracts[lang]["end_page"] = page_num  # 最后一页
            abstracts[lang][f"{lang}_count"] = char_counter if lang == "chinese" else word_counter
            abstracts[lang]["text"] = ' '.join(buffer).strip()

    # 新增：计算页数（至少1页）
    for lang in abstracts:
        if abstracts[lang]["start_page"]:
            abstracts[lang]["pages"] = max(1, abstracts[lang]["end_page"] - abstracts[lang]["start_page"] + 1)
        else:
            abstracts[lang]["pages"] = 0  # 未找到摘要

    return dict(abstracts)  # 转换为普通字典

def getContentPages(pdf_path):
    # pdf_path = "附件3/B1699.pdf"
    result = extract_abstracts(pdf_path)

    print("【中文摘要统计】")
    print(f"页码：P{result['chinese']['start_page']}-P{result['chinese']['end_page']}（{result['chinese']['pages']}页）")
    print(f"中文字符：{result['chinese']['char_count']}个")
    print("内容：\n" + result['chinese']['text'][:200] + "...")  # 预览前200字

    print("\n【英文摘要统计】")
    print(f"页码：P{result['english']['start_page']}-P{result['english']['end_page']}（{result['english']['pages']}页）")
    print(f"英文单词：{result['english']['word_count']}个")
    print("内容：\n" + result['english']['text'][:100] + "...")

if __name__ == '__main__':
    getContentPages("../附件3/B1699.pdf")

