import os
import re
import time
import argparse
import json
from dataclasses import dataclass
from typing import List, Dict, Any
from datetime import datetime
import paddle
from paddleocr import PaddleOCR
from pdf2image import convert_from_path
# 新增：图像增强依赖（处理模糊文本）
try:
    import cv2
    import numpy as np
except ImportError:
    raise ImportError("需安装图像增强依赖：pip install opencv-python numpy -i https://pypi.tuna.tsinghua.edu.cn/simple")


# -------------------------- 1. 配置类优化：新增检测阈值+提升DPI --------------------------
@dataclass
class Config:
    """PDF OCR 配置（适配 PaddleOCR 3.2.0 + 减少漏字优化）"""
    # OCR 核心配置：新增检测阈值（解决小文本/模糊文本漏检）
    ocr_model: str = "ch"
    text_det_limit_side_len: int = 2000  # 原1500→2000（捕捉更小文本）
    text_det_limit_type: str = "min"
    use_textline_orientation: bool = True
    enable_mkldnn: bool = True
    cpu_threads: int = 4
    # 新增：检测阈值参数（降低灵敏度，减少漏检）
    text_det_db_thresh: float = 0.2      # 二值化阈值（原默认0.3→0.2）
    text_det_db_box_thresh: float = 0.1  # 文本框筛选阈值（原默认0.5→0.1）
    text_det_db_unclip_ratio: float = 1.8# 文本框膨胀（原默认1.5→1.8，避免文本框过小）

    # 路径配置（不变）
    pdf_path: str = "/opt/test/files/ai_city_whitepaper.pdf"
    save_dir: str = "/opt/test/ocr_results"
    temp_image_dir: str = "/tmp/temp_pdf_imgs"

    # 文本处理配置：提升DPI（解决模糊文本）
    dpi: int = 300  # 原200→300（提升图像清晰度）
    chunk_size: int = 300
    chunk_overlap: int = 50
    min_chunk_len: int = 50


# -------------------------- 2. 核心处理类优化：图像增强+阈值调整+断行合并 --------------------------
class PDF2TXTProcessor:
    def __init__(self, config: Config):
        self.config = config
        self._pre_check()
        self._init_device()
        self.ocr = self._init_ocr()

    def _pre_check(self):
        """强制校验：新增OpenCV依赖检查"""
        try:
            required_paddle = "3.1.0"
            required_ocr = "3.2.0"
            # 1. Paddle 版本校验（不变）
            assert paddle.__version__ == required_paddle, \
                f"需 PaddlePaddle {required_paddle}，当前：{paddle.__version__}"
            
            import paddleocr
            assert paddleocr.__version__ == required_ocr, \
                f"需 PaddleOCR {required_ocr}，当前：{paddleocr.__version__}"

            # 2. 依赖校验：新增OpenCV检查（图像增强用）
            try:
                import pdf2image
                import cv2
                import numpy as np
            except ImportError as ie:
                missing_lib = str(ie).split("No module named ")[-1].strip("'")
                raise ImportError(f"安装缺失依赖：pip install {missing_lib} -i https://pypi.tuna.tsinghua.edu.cn/simple")

            # 3. 路径校验（不变）
            if not os.path.exists(self.config.pdf_path):
                raise FileNotFoundError(f"PDF不存在：{self.config.pdf_path}\n请确认路径无中文引号/空格")
            
            os.makedirs(self.config.save_dir, exist_ok=True)
            os.makedirs(self.config.temp_image_dir, exist_ok=True)
            print("✅ 前置校验通过（版本+依赖+路径正常）")

        except Exception as e:
            fix_cmd = (
                f"pip uninstall -y paddlepaddle paddleocr pdf2image opencv-python numpy && "
                f"pip install paddlepaddle=={required_paddle} "
                f"paddleocr=={required_ocr} pdf2image==1.17.0 opencv-python numpy "
                f"-i https://pypi.tuna.tsinghua.edu.cn/simple && "
                f"apt-get update && apt-get install -y poppler-utils"
            )
            raise RuntimeError(f"校验失败：{str(e)}\n修复命令：\n{fix_cmd}")

    def _init_device(self):
        paddle.set_device("cpu")
        print(f"✅ CPU设备初始化完成（MKLDNN加速：{self.config.enable_mkldnn}）")

    def _init_ocr(self) -> PaddleOCR:
        """OCR初始化：新增检测阈值参数（解决漏检）"""
        try:
            return PaddleOCR(
                lang=self.config.ocr_model,
                # 原有参数（已优化尺寸）
                text_det_limit_side_len=self.config.text_det_limit_side_len,
                text_det_limit_type=self.config.text_det_limit_type,
                use_textline_orientation=self.config.use_textline_orientation,
                enable_mkldnn=self.config.enable_mkldnn,
                cpu_threads=self.config.cpu_threads,
                device="cpu",
                # 新增：检测阈值参数（关键优化）
                det_db_thresh=self.config.text_det_db_thresh,
                det_db_box_thresh=self.config.text_det_db_box_thresh,
                det_db_unclip_ratio=self.config.text_det_db_unclip_ratio
            )
        except Exception as e:
            model_cmd = (
                "wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar -P ~/.paddlex/official_models/ && "
                "wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar -P ~/.paddlex/official_models/ && "
                "tar -xvf ~/.paddlex/official_models/ch_PP-OCRv3_det_infer.tar -C ~/.paddlex/official_models/ && "
                "tar -xvf ~/.paddlex/official_models/ch_PP-OCRv3_rec_infer.tar -C ~/.paddlex/official_models/"
            )
            raise RuntimeError(f"OCR初始化失败：{str(e)}\n手动下载模型：\n{model_cmd}")

    def pdf_to_images(self) -> List[str]:
        """PDF转图片：提升DPI+图像增强（解决模糊文本漏识别）"""
        print(f"\n[1/3] 转换PDF为图片（DPI={self.config.dpi}）+ 图像增强...")
        try:
            poppler_path = "/usr/bin/"
            pages = convert_from_path(
                pdf_path=self.config.pdf_path,
                dpi=self.config.dpi,  # 核心优化：DPI从200→300
                output_folder=self.config.temp_image_dir,
                fmt="jpg",
                thread_count=self.config.cpu_threads,
                poppler_path=poppler_path
            )

            image_paths = []
            for page_idx, page in enumerate(pages, 1):
                img_path = os.path.join(self.config.temp_image_dir, f"page_{page_idx}.jpg")
                # 保存高质量图片（减少压缩失真）
                page.save(img_path, "JPEG", quality=95)  # 原90→95

                # 新增：图像增强（去噪+对比度提升，模糊文本专用）
                img = cv2.imread(img_path)
                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 灰度化
                denoised = cv2.GaussianBlur(gray, (3, 3), 0)  # 高斯去噪
                # 自适应阈值：文本与背景对比增强（适合模糊/低清文本）
                enhanced = cv2.adaptiveThreshold(
                    denoised, 255,
                    cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                    cv2.THRESH_BINARY_INV,
                    11, 2  # 经验最优参数：窗口11，偏移2
                )
                cv2.imwrite(img_path, enhanced)  # 覆盖增强后的图片

                image_paths.append(img_path)

            print(f"✅ 转换+增强完成：共 {len(image_paths)} 页")
            return image_paths
        except Exception as e:
            if "poppler" in str(e).lower():
                raise RuntimeError(f"安装poppler：apt-get install -y poppler-utils\n原错误：{str(e)}")
            raise RuntimeError(f"PDF转图片失败：{str(e)}")

    def ocr_image_batch(self, image_paths: List[str]) -> List[Dict[str, Any]]:
        """批量OCR识别：降低置信度阈值（避免有效文本被过滤）"""
        print(f"[2/3] OCR识别（共 {len(image_paths)} 页）...")
        results = []
        for page_idx, img_path in enumerate(image_paths, 1):
            print(f"  处理第 {page_idx} 页...")
            try:
                raw_result = self.ocr.predict(img_path)
                if not raw_result or len(raw_result) == 0:
                    print(f"  ⚠️  第 {page_idx} 页无识别文本，跳过")
                    results.append({"page_num": page_idx, "text_blocks": [], "block_count": 0})
                    continue

                current_page = raw_result[0]
                rec_texts = current_page.get("rec_texts", [])
                rec_scores = current_page.get("rec_scores", [])

                page_text_blocks = []
                for text, score in zip(rec_texts, rec_scores):
                    text = text.strip()
                    confidence = round(score, 3)
                    # 核心优化：置信度从0.5→0.3（保留低置信度有效文本）
                    if text and confidence >= 0.3:
                        # 新增：打印低置信度文本（方便验证漏字原因）
                        if confidence < 0.5:
                            print(f"    ⚠️  低置信度文本：'{text}'（置信度：{confidence}）")
                        page_text_blocks.append({"text": text, "confidence": confidence, "page_num": page_idx})

                results.append({
                    "page_num": page_idx,
                    "text_blocks": page_text_blocks,
                    "block_count": len(page_text_blocks)
                })
                print(f"  ✅ 第 {page_idx} 页完成：{len(page_text_blocks)} 个文本块")

            except Exception as e:
                print(f"  ⚠️  第 {page_idx} 页识别警告：{str(e)}（跳过）")
                results.append({"page_num": page_idx, "text_blocks": [], "block_count": 0})

        total_blocks = sum(p["block_count"] for p in results)
        print(f"✅ OCR识别完成：{len(results)} 页，有效文本块 {total_blocks} 个")
        return results

    def text_clean_and_chunk(self, ocr_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """文本清洗分块：新增断行合并（解决“人工智-能”这类拆分漏字）"""
        print(f"[3/3] 文本清洗与分块...")
        structured_data = []
        for page in ocr_results:
            if page["block_count"] == 0:
                structured_data.append({
                    "pdf_path": self.config.pdf_path,
                    "page_num": page["page_num"],
                    "total_pages": len(ocr_results),
                    "chunks": [],
                    "chunk_count": 0
                })
                continue

            # 核心优化1：断行合并（行尾是连接符时合并下一行）
            text_blocks = [block["text"] for block in page["text_blocks"]]
            merged_text = ""
            for block in text_blocks:
                if merged_text and merged_text[-1] in "-/（[《‘\"":  # 常见连接符
                    merged_text = merged_text[:-1] + block  # 去掉连接符并合并
                else:
                    merged_text += " " + block
            full_text = merged_text.strip()

            # 原有清洗逻辑（不变）
            full_text = re.sub(r'\s+', ' ', full_text)
            full_text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9.,!?;:\'-，。！？；：（）()\[\]《》‘’""]', '', full_text)

            # 原有分块逻辑（不变）
            chunks = []
            start_idx = 0
            text_len = len(full_text)
            while start_idx < text_len:
                end_idx = start_idx + self.config.chunk_size
                if end_idx < text_len:
                    for back_idx in range(end_idx, max(start_idx, end_idx - 60), -1):
                        if full_text[back_idx] in '.!?;。！？；\n':
                            end_idx = back_idx + 1
                            break
                current_chunk = full_text[start_idx:end_idx].strip()
                if len(current_chunk) >= self.config.min_chunk_len:
                    chunks.append(current_chunk)
                start_idx = end_idx - self.config.chunk_overlap

            structured_data.append({
                "pdf_path": self.config.pdf_path,
                "page_num": page["page_num"],
                "total_pages": len(ocr_results),
                "raw_text_len": len(full_text),
                "chunks": chunks,
                "chunk_count": len(chunks),
                "process_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            })

        total_chunks = sum(p["chunk_count"] for p in structured_data)
        print(f"✅ 文本处理完成：共生成 {total_chunks} 个文本块")
        return structured_data

    # -------------------------- 以下方法完全不变 --------------------------
    def save_results(self, structured_data: List[Dict[str, Any]]):
        pdf_base_name = re.sub(r'[^\w-]', '', os.path.basename(self.config.pdf_path).replace(".pdf", ""))
        time_stamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        txt_path = os.path.join(self.config.save_dir, f"{pdf_base_name}_ocr_text_{time_stamp}.txt")
        with open(txt_path, "w", encoding="utf-8") as f:
            f.write("===== PDF OCR 提取结果 =====\n")
            f.write(f"PDF源路径：{self.config.pdf_path}\n")
            f.write(f"处理时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write(f"总页数：{len(structured_data)}\n")
            f.write(f"总文本块数：{sum(p['chunk_count'] for p in structured_data)}\n")
            f.write("=" * 80 + "\n\n")

            for page_data in structured_data:
                f.write(f"【第 {page_data['page_num']}/{page_data['total_pages']} 页】\n")
                if page_data["chunk_count"] == 0:
                    f.write("⚠️  本页无识别文本\n\n")
                    continue
                f.write(f"原文本长度：{page_data['raw_text_len']} 字符\n")
                f.write(f"文本块数：{page_data['chunk_count']} 个\n")
                f.write("-" * 60 + "\n")
                for chunk_idx, chunk in enumerate(page_data["chunks"], 1):
                    f.write(f"块{chunk_idx}：{chunk}\n\n")

        json_path = os.path.join(self.config.save_dir, f"{pdf_base_name}_ocr_struct_{time_stamp}.json")
        with open(json_path, "w", encoding="utf-8") as f:
            json.dump(structured_data, f, ensure_ascii=False, indent=2)

        print(f"\n✅ 结果保存完成：")
        print(f"  - 可读文本：{txt_path}")
        print(f"  - 结构化数据：{json_path}")

    def clean_temp_files(self, image_paths: List[str]):
        try:
            for img_path in image_paths:
                if os.path.exists(img_path):
                    os.remove(img_path)
            if os.path.exists(self.config.temp_image_dir) and not os.listdir(self.config.temp_image_dir):
                os.rmdir(self.config.temp_image_dir)
            print(f"✅ 临时文件清理完成（删除 {len(image_paths)} 张图片）")
        except Exception as e:
            print(f"⚠️  临时文件清理警告：{str(e)}（手动删除：{self.config.temp_image_dir}）")

    def run(self):
        start_total_time = time.time()
        temp_image_paths = []
        print("=" * 80)
        print("🎉 PDF OCR 提取流程启动（PaddleOCR 3.2.0 减少漏字版）")
        print(f"📄 目标PDF：{self.config.pdf_path}")
        print(f"⏰ 启动时间：{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        print("=" * 80)

        try:
            temp_image_paths = self.pdf_to_images()
            ocr_results = self.ocr_image_batch(temp_image_paths)
            structured_data = self.text_clean_and_chunk(ocr_results)
            self.save_results(structured_data)
            self.clean_temp_files(temp_image_paths)

            total_cost = round(time.time() - start_total_time, 2)
            total_chunks = sum(p["chunk_count"] for p in structured_data)
            print(f"\n" + "=" * 80)
            print(f"🎉 全部流程完成！")
            print(f"⏱️  总耗时：{total_cost} 秒")
            print(f"📊 统计：{len(structured_data)} 页PDF → {total_chunks} 个文本块")
            print(f"💾 结果目录：{self.config.save_dir}")
            print("=" * 80)

        except Exception as e:
            self.clean_temp_files(temp_image_paths)
            print(f"\n" + "=" * 80)
            print(f"💥 流程失败：{str(e)}")
            print("=" * 80)
            exit(1)


# -------------------------- 3. 脚本入口（不变） --------------------------
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="PDF OCR 提取工具（PaddleOCR 3.2.0 减少漏字版）")
    parser.add_argument("--pdf", type=str, default=Config.pdf_path, help="PDF路径")
    parser.add_argument("--save-dir", type=str, default=Config.save_dir, help="结果目录")
    args = parser.parse_args()

    config = Config(pdf_path=args.pdf, save_dir=args.save_dir)
    processor = PDF2TXTProcessor(config)
    processor.run()