import os
import json
from typing import List, Dict, Any
import io
import sys

# Python 3.8 兼容性补丁：修复 zoneinfo 导入问题
if sys.version_info < (3, 9):
    try:
        import backports.zoneinfo
        # 将 backports.zoneinfo 注册为 zoneinfo
        sys.modules['zoneinfo'] = backports.zoneinfo
    except ImportError:
        pass  # 如果 backports.zoneinfo 未安装，忽略（会在导入 PaddleX 时报错）

# 尝试导入必要的库
PIL_AVAILABLE = False
PADDLE_AVAILABLE = False
PADDLEX_AVAILABLE = False
FITZ_AVAILABLE = False

try:
    from PIL import Image
    PIL_AVAILABLE = True
except ImportError:
    print("⚠ 警告: PIL库未安装，请运行: pip install Pillow")

try:
    import paddle
    PADDLE_AVAILABLE = True
except ImportError:
    print("⚠ 警告: PaddlePaddle未安装，请运行: pip install paddlepaddle")

try:
    import paddlex as pdx
    PADDLEX_AVAILABLE = True
except Exception:
    print("⚠ 警告: PaddleX未安装，请运行: pip install paddlex")
    # 尝试从本地 seal_env 目录加载 PaddleX；失败则回退到 seal_env_backup
    try:
        import sys
        project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
        local_paddlex_path = os.path.join(project_root, "seal_env", "PaddleX")
        if os.path.isdir(local_paddlex_path) and local_paddlex_path not in sys.path:
            sys.path.insert(0, local_paddlex_path)
        import paddlex as pdx  # retry
        PADDLEX_AVAILABLE = True
        print("✓ 已从本地 seal_env/PaddleX 加载 PaddleX")
    except Exception:
        # 再次尝试从备份目录加载
        try:
            backup_paddlex_path = os.path.join(project_root, "seal_env_backup", "PaddleX")
            if os.path.isdir(backup_paddlex_path) and backup_paddlex_path not in sys.path:
                sys.path.insert(0, backup_paddlex_path)
            import paddlex as pdx  # retry backup
            PADDLEX_AVAILABLE = True
            print("✓ 已从本地 seal_env_backup/PaddleX 加载 PaddleX")
        except Exception:
            PADDLEX_AVAILABLE = False

try:
    from paddleocr import PaddleOCR
    PADDLEOCR_AVAILABLE = True
except Exception:
    print("⚠ 警告: PaddleOCR未安装，请运行: pip install paddleocr")
    # 尝试从本地 seal_env 目录加载 PaddleOCR；失败则回退到 seal_env_backup
    try:
        import sys
        project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
        local_paddleocr_path = os.path.join(project_root, "seal_env", "PaddleOCR")
        if os.path.isdir(local_paddleocr_path) and local_paddleocr_path not in sys.path:
            sys.path.insert(0, local_paddleocr_path)
        from paddleocr import PaddleOCR  # retry
        PADDLEOCR_AVAILABLE = True
        print("✓ 已从本地 seal_env/PaddleOCR 加载 PaddleOCR")
    except Exception:
        try:
            backup_paddleocr_path = os.path.join(project_root, "seal_env_backup", "PaddleOCR")
            if os.path.isdir(backup_paddleocr_path) and backup_paddleocr_path not in sys.path:
                sys.path.insert(0, backup_paddleocr_path)
            from paddleocr import PaddleOCR  # retry backup
            PADDLEOCR_AVAILABLE = True
            print("✓ 已从本地 seal_env_backup/PaddleOCR 加载 PaddleOCR")
        except Exception:
            PADDLEOCR_AVAILABLE = False

try:
    import fitz  # PyMuPDF用于PDF处理
    FITZ_AVAILABLE = True
except ImportError:
    print("⚠ 警告: PyMuPDF未安装，请运行: pip install PyMuPDF")

class OCRItem:
    """OCR识别项（包含文本和坐标）"""
    def __init__(self, text: str, coordinates: List[float], category: str = "", 
                 category_id: int = 0, confidence: float = 0.0):
        self.text = text
        self.coordinates = coordinates  # [x1, y1, x2, y2] 格式
        self.category = category  # 原始类别名称（如 "Title", "Table", "Seal"）
        self.category_id = category_id  # 类别ID
        self.confidence = confidence  # 检测置信度

class OCRResult:
    """OCR识别结果容器"""
    def __init__(self):
        self.stamp: List[OCRItem] = []  # 印章
        self.handwritten: List[OCRItem] = []  # 手写体
        self.printed: List[OCRItem] = []  # 印刷体
        # 新增细分类别支持
        self.title: List[OCRItem] = []  # 标题
        self.table: List[OCRItem] = []  # 表格
        self.figure: List[OCRItem] = []  # 图片
        self.figure_caption: List[OCRItem] = []  # 图片说明
        self.table_caption: List[OCRItem] = []  # 表格说明
        self.header: List[OCRItem] = []  # 页眉
        self.footer: List[OCRItem] = []  # 页脚
        self.equation: List[OCRItem] = []  # 公式
        self.full_layout: Dict[str, Any] = {}  # 全量版面信息

class PP_DocLayer_Model:
    """PP-DocLayer-L模型包装类"""
    def __init__(self, model_path: str):
        # 规范化模型路径（处理相对路径和绝对路径）
        if not os.path.isabs(model_path):
            # 如果是相对路径，基于当前工作目录解析（通常在 paddleocr 目录下运行）
            model_path = os.path.abspath(model_path)
        else:
            model_path = os.path.abspath(model_path)
        
        # 自动检测推理模型路径
        # 如果传入的是 best_model 目录，尝试查找 inference 子目录
        inference_path = os.path.join(model_path, "inference")
        if os.path.isdir(inference_path) and os.path.exists(os.path.join(inference_path, "inference.yml")):
            model_path = inference_path
            print(f"✓ 检测到推理模型目录: {model_path}")
        elif os.path.exists(os.path.join(model_path, "inference.yml")):
            # 路径已经是 inference 目录
            print(f"✓ 使用推理模型目录: {model_path}")
        else:
            # 检查模型路径是否存在
            if not os.path.exists(model_path):
                raise FileNotFoundError(f"模型路径不存在: {model_path}")
        
        # 尝试加载模型（PaddleX 3.x 使用 create_model/create_predictor）
        try:
            # 优先使用 PaddleX 的 create_model 接口
            if hasattr(pdx, "create_model"):
                self.model = pdx.create_model(model_name="PP-DocLayout-L", model_dir=model_path)
            else:
                from paddlex import inference as pdx_infer  # type: ignore
                self.model = pdx_infer.create_predictor(model_name="PP-DocLayout-L", model_dir=model_path)
            print(f"✓ 成功加载模型: {model_path}")
        except Exception as e:
            print(f"✗ 模型加载失败: {e}")
            print(f"  尝试的模型路径: {model_path}")
            # 提供更详细的错误信息
            if "inference.yml" in str(e):
                print(f"  提示: 请确保模型路径指向包含 inference.yml 的目录")
                print(f"  如果模型在 best_model 目录，应该使用 best_model/inference")
            raise
        
        # 类别映射（根据训练时的标签定义）
        self.category_map = {
            0: "_background_",
            1: "Text",
            2: "Title", 
            3: "Figure",
            4: "Figure caption",
            5: "Table",
            6: "Table caption",
            7: "Header",
            8: "Footer",
            9: "Seal",        # 印章
            10: "Equation",
            11: "Handwriting"  # 手写体
        }
        
        # 详细类别映射（保留所有细分类别）
        self.detailed_category_map = {
            9: "stamp",           # Seal -> stamp
            11: "handwritten",    # Handwriting -> handwritten
            1: "printed",         # Text -> printed
            2: "title",           # Title -> title
            3: "figure",          # Figure -> figure
            4: "figure_caption",  # Figure caption -> figure_caption
            5: "table",           # Table -> table
            6: "table_caption",   # Table caption -> table_caption
            7: "header",          # Header -> header
            8: "footer",          # Footer -> footer
            10: "equation"        # Equation -> equation
        }
        
        # 简化映射到三个主要类别（用于向后兼容）
        self.simple_category_map = {
            9: "stamp",
            11: "handwritten",
            1: "printed",
            2: "printed",      # Title -> printed (兼容)
            3: "printed",      # Figure -> printed (兼容)
            4: "printed",      # Figure caption -> printed (兼容)
            5: "printed",      # Table -> printed (兼容)
            6: "printed",      # Table caption -> printed (兼容)
            7: "printed",      # Header -> printed (兼容)
            8: "printed",      # Footer -> printed (兼容)
            10: "printed"      # Equation -> printed (兼容)
        }
        
        # 不同类别的置信度阈值（可针对不同类别调整）
        self.confidence_thresholds = {
            "stamp": 0.3,
            "handwritten": 0.3,
            "printed": 0.3,
            "title": 0.4,
            "table": 0.4,
            "figure": 0.3,
            "figure_caption": 0.3,
            "table_caption": 0.3,
            "header": 0.3,
            "footer": 0.3,
            "equation": 0.3
        }
        
        # 初始化 PaddleOCR 用于文本识别（参考用户提供的 pipeline 代码）
        self.ocr_reader = None
        if PADDLEOCR_AVAILABLE:
            try:
                # 参考用户代码：PaddleOCR(use_angle_cls=True, lang="ch", use_gpu=False)
                # 兼容不同版本的 paddleocr：部分版本不支持 show_log 参数
                try:
                    # 先尝试不带 show_log 的版本
                    self.ocr_reader = PaddleOCR(use_angle_cls=True, lang='ch')
                    print("✓ PaddleOCR 文本识别器初始化成功")
                except Exception as e1:
                    # 如果失败，尝试其他方式
                    try:
                        self.ocr_reader = PaddleOCR(use_angle_cls=True, lang='ch', show_log=False)
                        print("✓ PaddleOCR 文本识别器初始化成功（带 show_log=False）")
                    except Exception as e2:
                        print(f"⚠ PaddleOCR 初始化失败: {e1}, {e2}")
                        self.ocr_reader = None
            except Exception as e:
                print(f"⚠ PaddleOCR 初始化失败: {e}")
                import traceback
                traceback.print_exc()
                self.ocr_reader = None

    def process_pdf(self, pdf_bytes: bytes):
        """将PDF转换为图片列表"""
        if not FITZ_AVAILABLE:
            raise ImportError("PyMuPDF未安装，无法处理PDF")
        
        pdf_doc = fitz.open("pdf", pdf_bytes)
        images = []
        
        for page_num in range(pdf_doc.page_count):
            page = pdf_doc[page_num]
            pix = page.get_pixmap()
            img_data = pix.tobytes("png")
            img = Image.open(io.BytesIO(img_data))
            images.append(img)
        
        pdf_doc.close()
        return images

    def normalize_bbox(self, bbox, img_width: int, img_height: int):
        """规范化bbox坐标，支持多种输入格式"""
        if len(bbox) < 4:
            return None
        
        # 处理不同的bbox格式
        if len(bbox) == 4:
            # 可能是 [x1, y1, x2, y2] 或 [x, y, width, height]
            x1, y1, x2_or_w, y2_or_h = bbox
            x1, y1 = float(x1), float(y1)
            x2_or_w, y2_or_h = float(x2_or_w), float(y2_or_h)
            
            # 判断是绝对坐标还是相对坐标（通过值大小判断）
            if x2_or_w > img_width or y2_or_h > img_height:
                # 可能是 [x, y, width, height] 格式
                x2 = x1 + x2_or_w
                y2 = y1 + y2_or_h
            else:
                # 是 [x1, y1, x2, y2] 格式
                x2 = x2_or_w
                y2 = y2_or_h
        else:
            return None
        
        # 确保坐标在图像范围内
        x1 = max(0, min(x1, img_width))
        y1 = max(0, min(y1, img_height))
        x2 = max(0, min(x2, img_width))
        y2 = max(0, min(y2, img_height))
        
        # 确保坐标顺序正确
        if x2 <= x1 or y2 <= y1:
            return None
        
        return [int(x1), int(y1), int(x2), int(y2)]
    
    def extract_text_from_region(self, img, bbox, category: str = "printed"):
        """从指定区域提取文本内容（参考 PaddleX pipeline 机制）
        
        Args:
            img: PIL Image对象或 numpy array
            bbox: 边界框坐标 [x1, y1, x2, y2] 或 [x, y, width, height]
            category: 类别名称，用于调整OCR策略
        """
        if not self.ocr_reader:
            return "文本识别不可用"
        
        try:
            import numpy as np
            
            # 将 PIL Image 转换为 numpy array (BGR格式，供 cv2/PaddleOCR 使用)
            if PIL_AVAILABLE and isinstance(img, Image.Image):
                img_width, img_height = img.size
                # 转换为 numpy array (RGB)
                img_array = np.array(img)
                # PIL 是 RGB，PaddleOCR 需要 BGR，需要转换
                if len(img_array.shape) == 3 and img_array.shape[2] == 3:
                    # RGB -> BGR
                    img_array = img_array[:, :, ::-1]
            else:
                # 如果已经是 numpy array
                img_array = np.array(img)
                img_height, img_width = img_array.shape[:2]
            
            # 规范化bbox坐标
            normalized_bbox = self.normalize_bbox(bbox, img_width, img_height)
            if not normalized_bbox:
                return ""
            
            x1, y1, x2, y2 = normalized_bbox
            
            # 确保坐标是整数（cv2 裁剪需要整数）
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
            
            # 使用 cv2 风格裁剪（参考用户提供的 pipeline 代码）
            # crop_image = input_image[y1:y2, x1:x2]  # BGR格式的 numpy array
            crop_image = img_array[y1:y2, x1:x2]
            
            # 检查裁剪区域是否有效
            if crop_image.size == 0 or crop_image.shape[0] == 0 or crop_image.shape[1] == 0:
                print(f"  警告: 裁剪区域无效 (x1={x1}, y1={y1}, x2={x2}, y2={y2})")
                return ""
            
            # 根据类别调整OCR策略
            if category == "table":
                text_confidence_threshold = 0.4
            elif category == "stamp":
                text_confidence_threshold = 0.3  # 印章文本可能识别困难，降低阈值
            else:
                text_confidence_threshold = 0.5
            
            # 使用 PaddleOCR 识别文本（参考用户提供的 pipeline 代码）
            # PaddleOCR.ocr(crop_image) - 直接传入 numpy array (BGR格式)
            # 注意：新版本的 PaddleOCR 可能不支持 cls 参数，需要兼容处理
            try:
                result = self.ocr_reader.ocr(crop_image, cls=True)
            except TypeError:
                # 如果 cls 参数不支持，尝试不带参数
                try:
                    result = self.ocr_reader.ocr(crop_image)
                except Exception as e:
                    print(f"    OCR 调用失败: {e}")
                    return ""
            
            # 提取OCR识别的文本内容（参考用户提供的 pipeline 代码）
            # 新版本的 PaddleOCR 返回格式：result = [OCRResult对象]，其中 OCRResult 包含 'rec_texts' 和 'rec_scores'
            recognized_text = ""
            if result and len(result) > 0:
                ocr_result = result[0]
                
                # 检查是否是新的格式（包含 rec_texts 和 rec_scores）
                if isinstance(ocr_result, dict) and 'rec_texts' in ocr_result:
                    # 新格式：使用 rec_texts 和 rec_scores
                    rec_texts = ocr_result.get('rec_texts', [])
                    rec_scores = ocr_result.get('rec_scores', [])
                    
                    texts = []
                    for i, text in enumerate(rec_texts):
                        if text:  # 确保文本不为空
                            # 获取对应的置信度
                            confidence = rec_scores[i] if i < len(rec_scores) else 1.0
                            if confidence > text_confidence_threshold:
                                texts.append(text)
                    
                    recognized_text = " ".join(texts)
                    
                elif isinstance(ocr_result, (list, tuple)) and len(ocr_result) > 0:
                    # 旧格式：result[0] 是列表，每个元素是 [[坐标], (文本, 置信度)]
                    texts = []
                    for line in ocr_result:
                        if line and len(line) >= 2:
                            text_info = line[1]
                            if isinstance(text_info, (list, tuple)) and len(text_info) >= 2:
                                text = text_info[0]  # 文本内容
                                confidence = text_info[1]  # 置信度
                                if confidence > text_confidence_threshold:
                                    texts.append(text)
                    recognized_text = " ".join(texts)
            
            if recognized_text:
                print(f"    OCR识别成功: {recognized_text}")
            else:
                print(f"    OCR识别为空 (类别={category}, 阈值={text_confidence_threshold})")
            
            return recognized_text
                
        except Exception as e:
            print(f"⚠ 文本识别失败 (类别={category}): {e}")
            import traceback
            traceback.print_exc()
            return ""

    def recognize(self, file_bytes: bytes) -> OCRResult:
        """处理文件（图片或PDF）并返回OCR结果"""
        result = OCRResult()
        
        # 判断文件类型并处理
        try:
            # 尝试作为图片处理
            if PIL_AVAILABLE:
                img = Image.open(io.BytesIO(file_bytes))
                if img.mode != 'RGB':
                    img = img.convert('RGB')
                images = [img]
            else:
                raise ImportError("PIL未安装，无法处理图片")
        except:
            # 作为PDF处理
            if not FITZ_AVAILABLE:
                raise ImportError("PyMuPDF未安装，无法处理PDF")
            images = self.process_pdf(file_bytes)
        
        # 逐页处理
        full_layout = []
        for page_idx, img in enumerate(images):
            print(f"正在处理第 {page_idx + 1} 页...")
            
            # 模型预测
            try:
                # PaddleX 3.x 的推理接口为可迭代生成器，需要转为 list
                import numpy as np
                np_img = np.array(img)
                
                try:
                    # 尝试使用 predict 方法
                    preds = self.model.predict(np_img, batch_size=1, layout_nms=True)
                except (TypeError, AttributeError):
                    # 兼容 predictor 可调用形式
                    preds = self.model(image=np_img)
                
                # 处理 PaddleX 返回的格式
                # 可能返回字典格式: {'res': {'boxes': [...]}} 或 {'boxes': [...]}
                if isinstance(preds, dict):
                    # 先检查是否有 'res' 字段
                    if 'res' in preds:
                        preds = preds['res']
                    # 再检查是否有 'boxes' 字段
                    if isinstance(preds, dict) and 'boxes' in preds:
                        preds = preds['boxes']
                    elif isinstance(preds, dict) and 'boxes' not in preds:
                        # 可能是直接包含 boxes 的格式，但这里应该已经是字典了
                        # 如果 preds 是字典但没有 boxes，可能是单个预测结果
                        preds = [preds]
                
                # 确保 preds 是列表格式
                if hasattr(preds, '__iter__') and not isinstance(preds, (list, tuple)):
                    preds = list(preds)
                
                # 特殊处理：如果列表中的元素是包含 'boxes' 的字典，需要提取 boxes
                # 例如：[{'boxes': [...]}] 这种情况
                if preds and len(preds) > 0 and isinstance(preds[0], dict) and 'boxes' in preds[0]:
                    # 提取第一个元素中的 boxes
                    preds = preds[0]['boxes']
                    print(f"  从嵌套结构中提取 boxes: {len(preds)} 个目标")
                
                # 调试信息：打印预测结果格式
                if preds and len(preds) > 0:
                    first_pred = preds[0]
                    if isinstance(first_pred, dict):
                        # 只打印关键字段，避免打印整个数组
                        preview = {k: v for k, v in first_pred.items() if k not in ['input_img']}
                        print(f"  预测结果示例（第一个）: {preview}")
                    else:
                        print(f"  预测结果示例（第一个）: {type(first_pred)}")
                
                print(f"✓ 模型预测完成，检测到 {len(preds)} 个目标")
            except Exception as e:
                print(f"✗ 模型预测失败: {e}")
                import traceback
                traceback.print_exc()
                continue
            
            # 处理预测结果
            page_layout = {
                "page": page_idx + 1,
                "stamp": [],
                "handwritten": [],
                "printed": [],
                "title": [],
                "table": [],
                "figure": [],
                "figure_caption": [],
                "table_caption": [],
                "header": [],
                "footer": [],
                "equation": []
            }
            
            img_width, img_height = img.size
            
            for pred in preds:
                # 处理 PaddleX 预测结果的格式
                # 支持多种可能的返回格式
                category_id = 0
                bbox = [0, 0, 0, 0]
                confidence = 0.0
                
                if isinstance(pred, dict):
                    # 字典格式：支持多种字段名
                    # 类别ID: category_id, cls_id, category
                    category_id = pred.get("category_id") or pred.get("cls_id") or pred.get("category", 0)
                    # 坐标: bbox, coordinate, coords
                    bbox = pred.get("bbox") or pred.get("coordinate") or pred.get("coords", [0, 0, 0, 0])
                    # 置信度: confidence, score
                    confidence = pred.get("confidence") or pred.get("score", 0.0)
                elif hasattr(pred, 'category_id') or hasattr(pred, 'cls_id'):
                    # 对象格式
                    category_id = getattr(pred, 'category_id', None) or getattr(pred, 'cls_id', 0)
                    bbox = getattr(pred, 'bbox', None) or getattr(pred, 'coordinate', None) or [0, 0, 0, 0]
                    confidence = getattr(pred, 'confidence', None) or getattr(pred, 'score', 0.0)
                elif isinstance(pred, (list, tuple)) and len(pred) >= 3:
                    # 可能是 (category_id, bbox, confidence) 格式
                    category_id = pred[0]
                    bbox = pred[1] if len(pred) > 1 else [0, 0, 0, 0]
                    confidence = pred[2] if len(pred) > 2 else 0.0
                else:
                    # 尝试其他可能的格式
                    print(f"  未知的预测结果格式: {type(pred)} - {pred}")
                    continue
                
                # 跳过背景类别
                if category_id == 0:
                    print(f"  跳过背景类别: {pred}")
                    continue
                
                # 获取类别名称
                category_name = self.category_map.get(category_id, "Unknown")
                detailed_category = self.detailed_category_map.get(category_id, "printed")
                simple_category = self.simple_category_map.get(category_id, "printed")
                
                # 调试信息
                if category_id not in self.category_map:
                    print(f"  警告: 未知类别ID {category_id}")
                
                # 规范化bbox坐标
                normalized_bbox = self.normalize_bbox(bbox, img_width, img_height)
                if not normalized_bbox:
                    print(f"  跳过无效bbox: {bbox}")
                    continue
                
                # 获取该类别的置信度阈值
                threshold = self.confidence_thresholds.get(detailed_category, 0.3)
                
                # 添加调试信息
                print(f"  检测到目标: 类别={category_id}({category_name}->{detailed_category}), "
                      f"置信度={confidence:.3f}, 坐标={normalized_bbox}")
                
                # 根据类别使用不同的置信度阈值
                if confidence < threshold:
                    print(f"    跳过低置信度目标: {confidence:.3f} < {threshold}")
                    continue
                
                # 提取区域内的文本内容（传入类别信息用于调整OCR策略）
                text_content = self.extract_text_from_region(img, normalized_bbox, detailed_category)
                if not text_content:
                    text_content = f"{detailed_category}_{category_id}"
                
                # 创建OCR项目（包含完整信息）
                item = OCRItem(
                    text=text_content,
                    coordinates=normalized_bbox,
                    category=category_name,
                    category_id=category_id,
                    confidence=confidence
                )
                
                # 添加到详细类别
                if detailed_category == "stamp":
                    result.stamp.append(item)
                    page_layout["stamp"].append({"text": item.text, "coordinates": item.coordinates, 
                                                 "category": item.category, "confidence": item.confidence})
                elif detailed_category == "handwritten":
                    result.handwritten.append(item)
                    page_layout["handwritten"].append({"text": item.text, "coordinates": item.coordinates,
                                                        "category": item.category, "confidence": item.confidence})
                elif detailed_category == "title":
                    result.title.append(item)
                    page_layout["title"].append({"text": item.text, "coordinates": item.coordinates,
                                                 "category": item.category, "confidence": item.confidence})
                elif detailed_category == "table":
                    result.table.append(item)
                    page_layout["table"].append({"text": item.text, "coordinates": item.coordinates,
                                                  "category": item.category, "confidence": item.confidence})
                elif detailed_category == "figure":
                    result.figure.append(item)
                    page_layout["figure"].append({"text": item.text, "coordinates": item.coordinates,
                                                   "category": item.category, "confidence": item.confidence})
                elif detailed_category == "figure_caption":
                    result.figure_caption.append(item)
                    page_layout["figure_caption"].append({"text": item.text, "coordinates": item.coordinates,
                                                           "category": item.category, "confidence": item.confidence})
                elif detailed_category == "table_caption":
                    result.table_caption.append(item)
                    page_layout["table_caption"].append({"text": item.text, "coordinates": item.coordinates,
                                                         "category": item.category, "confidence": item.confidence})
                elif detailed_category == "header":
                    result.header.append(item)
                    page_layout["header"].append({"text": item.text, "coordinates": item.coordinates,
                                                   "category": item.category, "confidence": item.confidence})
                elif detailed_category == "footer":
                    result.footer.append(item)
                    page_layout["footer"].append({"text": item.text, "coordinates": item.coordinates,
                                                   "category": item.category, "confidence": item.confidence})
                elif detailed_category == "equation":
                    result.equation.append(item)
                    page_layout["equation"].append({"text": item.text, "coordinates": item.coordinates,
                                                     "category": item.category, "confidence": item.confidence})
                else:
                    # 默认归类到printed（向后兼容）
                    result.printed.append(item)
                    page_layout["printed"].append({"text": item.text, "coordinates": item.coordinates,
                                                    "category": item.category, "confidence": item.confidence})
            
            full_layout.append(page_layout)
            print(f"✓ 第 {page_idx + 1} 页处理完成: "
                  f"印章{len(page_layout['stamp'])} "
                  f"手写{len(page_layout['handwritten'])} "
                  f"标题{len(page_layout['title'])} "
                  f"表格{len(page_layout['table'])} "
                  f"图片{len(page_layout['figure'])} "
                  f"印刷{len(page_layout['printed'])}")
        
        # 保存全量版面信息
        result.full_layout = full_layout
        return result

def mock_ocr_recognize(file_bytes: bytes) -> OCRResult:
    """模拟OCR识别函数，当模型不可用时使用"""
    result = OCRResult()
    
    # 创建模拟的识别结果（包含模拟的文本内容）
    # 对于"多个椭圆章.jpg"，应该识别出4个印章
    result.stamp = [
        OCRItem("财务专用章", [100, 100, 200, 150]),
        OCRItem("公司印章", [300, 200, 400, 250]),
        OCRItem("合同专用章", [500, 100, 600, 150]),
        OCRItem("人事专用章", [700, 200, 800, 250])
    ]
    
    result.handwritten = [
        OCRItem("张三", [50, 300, 250, 350]),
        OCRItem("2024年1月15日", [100, 400, 300, 450])
    ]
    
    result.printed = [
        OCRItem("合同编号：HT2024001", [50, 500, 400, 550]),
        OCRItem("甲方：北京科技有限公司", [50, 600, 400, 650])
    ]
    
    # 模拟全量版面信息
    result.full_layout = [{
        "page": 1,
        "stamp": [{"text": item.text, "coordinates": item.coordinates} for item in result.stamp],
        "handwritten": [{"text": item.text, "coordinates": item.coordinates} for item in result.handwritten],
        "printed": [{"text": item.text, "coordinates": item.coordinates} for item in result.printed]
    }]
    
    print("✓ 使用模拟模式进行OCR识别")
    return result

# ==================== 模型初始化 ====================

# 初始化模型（支持环境变量覆盖）
default_model_path = "seal_env/PaddleX/output_last/best_model"
model_path = os.environ.get("DOCLAYER_MODEL_PATH", default_model_path)

# 规范化路径（处理相对路径）
if not os.path.isabs(model_path):
    # 基于 paddleocr 目录解析相对路径
    # __file__ 是 src/service/ocr_service.py，需要回到 paddleocr 目录
    current_file = os.path.abspath(__file__)  # src/service/ocr_service.py
    src_dir = os.path.dirname(current_file)  # src/service
    service_dir = os.path.dirname(src_dir)  # src
    paddleocr_dir = os.path.dirname(service_dir)  # paddleocr
    model_path = os.path.join(paddleocr_dir, model_path)
model_path = os.path.abspath(model_path)

# 检查模型路径是否存在（检查 best_model 或 best_model/inference）
model_exists = False
inference_path = os.path.join(model_path, "inference")
if os.path.isdir(model_path):
    # 如果 best_model/inference 存在，使用它
    if os.path.isdir(inference_path) and os.path.exists(os.path.join(inference_path, "inference.yml")):
        model_exists = True
        model_path = inference_path
    # 如果 best_model 本身包含 inference.yml，也使用它
    elif os.path.exists(os.path.join(model_path, "inference.yml")):
        model_exists = True

if not model_exists:
    print(f"⚠ 警告: 模型文件不存在: {model_path}")
    print(f"  尝试查找: {os.path.join(model_path, 'inference')}")
    print("OCR模型不可用，无法进行识别")
    your_ocr_model = None
else:
    if PADDLEX_AVAILABLE:
        try:
            ocr_model_instance = PP_DocLayer_Model(model_path)
            your_ocr_model = ocr_model_instance.recognize
            print(f"✓ OCR模型初始化成功: {model_path}")
        except Exception as e:
            print(f"✗ 模型加载失败: {e}")
            print("OCR模型不可用，无法进行识别")
            your_ocr_model = None
    else:
        print("⚠ 警告: PaddleX不可用，OCR模型不可用，无法进行识别")
        your_ocr_model = None

def compare_ocr_results(old: OCRResult, new: OCRResult) -> Dict[str, Any]:
    """对比两个OCR结果的差异"""
    # 提取文本内容用于对比
    def get_text_items(items: List[OCRItem]) -> List[str]:
        return [item.text for item in items]
    
    # 对比印章差异
    old_stamp_text = get_text_items(old.stamp)
    new_stamp_text = get_text_items(new.stamp)
    stamp_diff = {
        "added": [t for t in new_stamp_text if t not in old_stamp_text],
        "removed": [t for t in old_stamp_text if t not in new_stamp_text]
    }
    
    # 对比手写体差异
    old_hand_text = get_text_items(old.handwritten)
    new_hand_text = get_text_items(new.handwritten)
    handwritten_diff = {
        "added": [t for t in new_hand_text if t not in old_hand_text],
        "removed": [t for t in old_hand_text if t not in new_hand_text]
    }
    
    # 对比印刷体差异（用于检测篡改）
    old_printed_text = get_text_items(old.printed)
    new_printed_text = get_text_items(new.printed)
    printed_diff = {
        "added": [t for t in new_printed_text if t not in old_printed_text],
        "removed": [t for t in old_printed_text if t not in new_printed_text]
    }
    
    return {
        "stamp_diff": stamp_diff,
        "handwritten_diff": handwritten_diff,
        "printed_diff": printed_diff,
        "is_tampered": len(printed_diff["added"]) > 0 or len(printed_diff["removed"]) > 0
    }