import pdfplumber
import re
from collections import defaultdict


def extract_title(pdf_path):
    domain_keywords = {
        "直肠癌", "淋巴结转移", "智能诊断", "CT影像", "肿瘤分割",
        "特征提取", "分类模型", "Dice系数", "随机森林", "Mask RCNN"
    }

    # 优化后的标题正则（增强换行符处理）
    title_pattern = re.compile(
        r"^(?=.*[\u4e00-\u9fa5])"  # 必须含汉字
        r"[\u4e00-\u9fa5A-Za-z0-9\s\-（）：“”]+"  # 扩展中文标点支持
        r"(?:[\n\u0085\u000A]{1,2}[\u4e00-\u9fa5A-Za-z0-9\s\-（）：“”]+)*$",
        re.DOTALL | re.UNICODE
    )

    # 增强过滤规则（新增比赛相关关键词）
    filter_pattern = re.compile(
        r"^(?:作\s*者|单\s*位|日\s*期|摘\s*要|关\s*键\s*词|目\s*录|"
        r"版\s*权|页\s*码|图\s*注|表\s*注|泰\s*迪\s*杯|挑\s*战\s*赛|"
        r"数\s*据\s*挖\s*掘)|^\s*$|^[0-9]{4}年",
        re.IGNORECASE | re.MULTILINE
    )

    def merge_title_blocks(page):
        words = page.extract_words(x_tolerance=2, y_tolerance=2)
        if not words: return ""

        lines = []
        current_line = []
        prev_y = words[0]['top']
        for word in words:
            if abs(word['top'] - prev_y) > 5:
                lines.append(' '.join(current_line).strip())
                current_line = []
                prev_y = word['top']
            current_line.append(word['text'])
        lines.append(' '.join(current_line).strip())

        merged = []
        i = 0
        while i < len(lines):
            line = lines[i].rstrip("：-—")
            if i + 1 < len(lines) and re.match(r"^[\u4e00-\u9fa5A-Za-z]", lines[i + 1]):
                merged_line = f"{line} {lines[i + 1]}"
                if 8 <= len(merged_line) <= 50 and title_pattern.match(merged_line):
                    merged.append(merged_line)
                    i += 2
                    continue
            merged.append(line)
            i += 1
        return '\n'.join(merged)

    # 评分体系（保持不变）
    def score_candidate(candidate, page_num):
        score = 0
        if page_num == 1: score += 7

        length = len(candidate)
        if 18 <= length <= 32:
            score += 8
        elif 12 <= length <= 40:
            score += 4

        words = re.findall(r"[\u4e00-\u9fa5A-Za-z0-9]+", candidate)
        common_words = domain_keywords & set(words)
        score += len(common_words) * 3
        if re.search(r"(诊断|预测|分割|模型)", candidate):
            score += 5

        line_breaks = candidate.count('\n')
        if line_breaks == 1:
            score += 5
        elif line_breaks > 1:
            score -= 3

        return score

    with pdfplumber.open(pdf_path) as pdf:
        max_pages = min(3, len(pdf.pages))
        candidates = []
        for page_num in range(1, max_pages + 1):
            page = pdf.pages[page_num - 1]
            if not page: continue

            merged_text = merge_title_blocks(page)
            for line in merged_text.split('\n'):
                line = line.strip()

                # --- 新增：赛事前缀清洗逻辑 ---
                if "泰迪杯" in line:
                    # 提取标题中的中文词汇（排除标点）
                    title_words = re.findall(r"[\u4e00-\u9fa5]+", line)
                    # 检查是否包含领域关键词
                    if not (domain_keywords & set(title_words)):
                        continue  # 无领域词则过滤

                # 原有过滤
                if not line or filter_pattern.search(line):
                    continue
                if title_pattern.match(line):
                    candidates.append((page_num, line))

    # --- 后续排序逻辑保持不变 ---
    if not candidates: return "未找到有效标题"
    candidates.sort(key=lambda x: (x[0], -score_candidate(x[1], x[0]), -len(x[1])))
    seen = set()
    print(candidates)
    for _, candidate in candidates:
        candidate_clean = re.sub(
            r'(?:^|[\s\u3000]+)(题|摘\s*要|[A-Z])(?:$|[\s\u3000]+)|(?<=\s)[A-Z](?=\s)',
            '',
            candidate,
            flags=re.UNICODE
        )
        print(candidate_clean)
        # candidate_clean = re.sub(r'\s+', ' ', candidate)
        if candidate_clean not in seen:
            return candidate_clean
    return "未找到有效标题"


# --- 测试用例（基于用户代码） ---
pdf_path = "附件3/B5978.pdf"
title = extract_title(pdf_path)
print("【赛事前缀清洗后结果】")
print(title)