import pdfplumber
import re
from collections import defaultdict
import pytesseract
from PIL import Image, ImageEnhance
import pdf2image
import cv2
import numpy as np


# --- 新增：OCR预处理函数 ---
def preprocess_image(image):
    """扫描件预处理（二值化+降噪）"""
    # 转为灰度图
    gray = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
    # 自适应阈值二值化
    thresh = cv2.adaptiveThreshold(
        gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
    )
    # 中值滤波降噪
    denoised = cv2.medianBlur(thresh, 3)
    return Image.fromarray(denoised)


# --- 增强版OCR函数（含路径配置） ---
def ocr_pdf(pdf_path, poppler_path=r"E:\poppler\poppler\poppler-24.08.0\Library\bin"):
    """支持扫描版PDF的OCR提取（带预处理）"""
    try:
        # 显式指定poppler路径（解决权限问题）
        images = pdf2image.convert_from_path(
            pdf_path,
            poppler_path=poppler_path,
            dpi=300  # 提高OCR精度
        )
    except Exception as e:
        print(f"PDF转图片失败: {str(e)}")
        return ""

    full_text = ""
    for img in images:
        # 预处理扫描件
        processed = preprocess_image(img)
        # 中英文混合OCR（chi_sim+eng）
        text = pytesseract.image_to_string(
            processed,
            lang="chi_sim+eng",
            config="--psm 6 -l chi_sim"  # 假设标题为段落模式
        )
        full_text += text + "\n"
    return full_text


def extract_title(pdf_path):
    domain_keywords = {
        "直肠癌", "淋巴结转移", "智能诊断", "CT影像", "肿瘤分割",
        "特征提取", "分类模型", "Dice系数", "随机森林", "Mask RCNN"
    }

    # 优化后的标题正则（增强换行符处理）
    title_pattern = re.compile(
        r"^(?=.*[\u4e00-\u9fa5])"  # 必须含汉字
        r"[\u4e00-\u9fa5A-Za-z0-9\s\-（）：“”]+"  # 扩展中文标点支持
        r"(?:[\n\u0085\u000A]{1,2}[\u4e00-\u9fa5A-Za-z0-9\s\-（）：“”]+)*$",
        re.DOTALL | re.UNICODE
    )

    # 增强过滤规则（新增比赛相关关键词）
    filter_pattern = re.compile(
        r"^(?:作\s*者|单\s*位|日\s*期|摘\s*要|关\s*键\s*词|目\s*录|"
        r"版\s*权|页\s*码|图\s*注|表\s*注|泰\s*迪\s*杯|挑\s*战\s*赛|"
        r"数\s*据\s*挖\s*掘)|^\s*$|^[0-9]{4}年",
        re.IGNORECASE | re.MULTILINE
    )

    def merge_title_blocks(page):
        words = page.extract_words(x_tolerance=2, y_tolerance=2)
        if not words: return ""

        lines = []
        current_line = []
        prev_y = words[0]['top']
        for word in words:
            if abs(word['top'] - prev_y) > 5:
                lines.append(' '.join(current_line).strip())
                current_line = []
                prev_y = word['top']
            current_line.append(word['text'])
        lines.append(' '.join(current_line).strip())

        merged = []
        i = 0
        while i < len(lines):
            line = lines[i].rstrip("：-—")
            if i + 1 < len(lines) and re.match(r"^[\u4e00-\u9fa5A-Za-z]", lines[i + 1]):
                merged_line = f"{line} {lines[i + 1]}"
                if 8 <= len(merged_line) <= 50 and title_pattern.match(merged_line):
                    merged.append(merged_line)
                    i += 2
                    continue
            merged.append(line)
            i += 1
        return '\n'.join(merged)

    # 评分体系（保持不变）
    def score_candidate(candidate, page_num):
        score = 0
        if page_num == 1: score += 7

        length = len(candidate)
        if 18 <= length <= 32:
            score += 8
        elif 12 <= length <= 40:
            score += 4

        words = re.findall(r"[\u4e00-\u9fa5A-Za-z0-9]+", candidate)
        common_words = domain_keywords & set(words)
        score += len(common_words) * 3
        if re.search(r"(诊断|预测|分割|模型)", candidate):
            score += 5

        line_breaks = candidate.count('\n')
        if line_breaks == 1:
            score += 5
        elif line_breaks > 1:
            score -= 3

        return score

    with pdfplumber.open(pdf_path) as pdf:
        max_pages = min(3, len(pdf.pages))
        candidates = []

        for page_num in range(1, max_pages + 1):
            page = pdf.pages[page_num - 1]

            # --- 智能切换：文字版优先，扫描版OCR ---
            text_mode = True
            try:
                merged_text = merge_title_blocks(page)
                if not merged_text:
                    raise ValueError("无文字内容")
            except:
                text_mode = False

            if text_mode:
                lines = merged_text.split('\n')
            else:
                # 扫描版OCR（仅处理前3页）
                ocr_text = ocr_pdf(pdf_path)
                lines = ocr_text.split('\n')[:50]  # 取前50行（标题通常在头部）

            for line in lines:
                line = line.strip()

                # --- 新增：扫描件特有的噪声过滤 ---
                line = re.sub(
                    r"[\u3000\xa0\u2002-\u2005]+",  # 全角空格/控制符
                    " ",
                    line
                ).strip()

                # 赛事前缀清洗（用户指定逻辑）
                if "泰迪杯" in line:
                    title_words = re.findall(r"[\u4e00-\u9fa5]+", line)
                    if not (domain_keywords & set(title_words)):
                        continue

                if not line or filter_pattern.search(line):
                    continue
                if title_pattern.match(line):
                    candidates.append((page_num, line))

    # --- 后续逻辑保持不变 ---
    if not candidates:
        return "未找到有效标题（尝试OCR后仍失败）"
    candidates.sort(key=lambda x: (x[0], -score_candidate(x[1], x[0]), -len(x[1])))
    seen = set()
    for _, candidate in candidates:
        candidate_clean = re.sub(
            r"(?<=[：:])\s*[A-Z]题\s*", "",  # 清洗题号前缀
            candidate
        ).strip()
        if candidate_clean and candidate_clean not in seen:
            return candidate_clean
    return "未找到有效标题"


# --- 测试与调试 ---
if __name__ == "__main__":
    pdf_path = "附件3/B3108.pdf"  # 扫描版PDF路径
    try:
        title = extract_title(pdf_path)
        print("【最终提取结果】")
        print(title)
    except Exception as e:
        print(f"错误: {str(e)}")
        # 调试：打印OCR原始输出
        print("\n【OCR原始文本（前200字）】")
        print(ocr_pdf(pdf_path)[:200] + "...")