import os
import cv2
import json
import numpy as np
from paddleocr import PaddleOCR
import layoutparser as lp

class ImageOCRProcessor:
    def __init__(self, lang='ch'):
        # 初始化OCR引擎
        self.ocr = PaddleOCR(use_angle_cls=True, lang=lang)
        
        # 初始化布局分析模型
        self.layout_model = lp.Detectron2LayoutModel(
            'lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config',
            extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.5],
            label_map={0: "Text", 1: "Title", 2: "List", 3: "Table", 4: "Figure"}
        )
    
    def process_image(self, image_path, output_dir=None):
        """处理图片文件，提取文本和布局信息"""
        if output_dir is None:
            output_dir = os.path.dirname(image_path)
        os.makedirs(output_dir, exist_ok=True)
        
        # 处理图像
        result = self.process_single_image(image_path)
        
        # 保存结果为JSON
        base_name = os.path.basename(image_path).split('.')[0]
        output_json = os.path.join(output_dir, f"{base_name}_result.json")
        with open(output_json, 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)
            
        return output_json
    
    def process_single_image(self, img_path):
        """处理单个图像，提取文本和布局"""
        # 读取图像
        image = cv2.imread(img_path)
        if image is None:
            raise ValueError(f"无法读取图像: {img_path}")
        
        # 预处理图像（可选）
        # image = self.preprocess_image(image)
        
        # OCR识别
        ocr_result = self.ocr.ocr(img_path, cls=True)
        
        # 布局分析
        layout_image = lp.load_image(img_path)
        layout = self.layout_model.detect(layout_image)
        
        # 构建结果
        result = {
            "image": os.path.basename(img_path),
            "blocks": []
        }
        
        # 处理布局元素
        for block in layout:
            block_coords = block.coordinates
            x1, y1, x2, y2 = [int(coord) for coord in [block_coords[0], block_coords[1], block_coords[2], block_coords[3]]]
            
            # 提取该区域内的OCR结果
            block_texts = []
            for line in ocr_result[0]:
                bbox = line[0]
                text = line[1][0]
                confidence = line[1][1]
                
                # 检查文本框是否在当前布局块内
                text_x1, text_y1 = bbox[0][0], bbox[0][1]
                text_x2, text_y2 = bbox[2][0], bbox[2][1]
                
                if (x1 <= text_x1 <= x2 and y1 <= text_y1 <= y2) or \
                   (x1 <= text_x2 <= x2 and y1 <= text_y2 <= y2):
                    block_texts.append({
                        "text": text,
                        "confidence": float(confidence),
                        "bbox": [[float(p[0]), float(p[1])] for p in bbox]
                    })
            
            # 根据布局类型处理
            if block.type == "Table":
                # 对于表格，尝试提取行和列
                table_content = self.extract_table_structure(image[y1:y2, x1:x2], block_texts)
                result["blocks"].append({
                    "type": "table",
                    "coordinates": [x1, y1, x2, y2],
                    "content": table_content
                })
            else:
                # 对于其他类型的块
                result["blocks"].append({
                    "type": block.type.lower(),
                    "coordinates": [x1, y1, x2, y2],
                    "content": block_texts
                })
        
        # 如果布局分析没有找到任何块，则直接使用OCR结果
        if not result["blocks"] and ocr_result[0]:
            all_texts = []
            for line in ocr_result[0]:
                text = line[1][0]
                confidence = line[1][1]
                bbox = line[0]
                all_texts.append({
                    "text": text,
                    "confidence": float(confidence),
                    "bbox": [[float(p[0]), float(p[1])] for p in bbox]
                })
            
            result["blocks"].append({
                "type": "text",
                "coordinates": [0, 0, image.shape[1], image.shape[0]],
                "content": all_texts
            })
        
        return result
    
    def preprocess_image(self, image):
        """图像预处理，提高OCR质量"""
        # 转为灰度图
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        
        # 二值化
        _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # 降噪（可选）
        denoised = cv2.fastNlMeansDenoising(binary, None, 10, 7, 21)
        
        return denoised
    
    def extract_table_structure(self, table_image, texts):
        """尝试提取表格结构"""
        # 这里可以实现更复杂的表格结构提取算法
        # 简单实现：根据文本Y坐标分组为行
        if not texts:
            return []
            
        # 按Y坐标排序
        texts.sort(key=lambda x: x["bbox"][0][1])
        
        # 简单的行分组（可以改进）
        rows = []
        current_row = [texts[0]]
        row_y = texts[0]["bbox"][0][1]
        
        for text in texts[1:]:
            text_y = text["bbox"][0][1]
            # 如果Y坐标相差不大，认为是同一行
            if abs(text_y - row_y) < 20:  # 阈值可调整
                current_row.append(text)
            else:
                # 按X坐标排序当前行
                current_row.sort(key=lambda x: x["bbox"][0][0])
                rows.append([t["text"] for t in current_row])
                # 开始新行
                current_row = [text]
                row_y = text_y
        
        # 处理最后一行
        if current_row:
            current_row.sort(key=lambda x: x["bbox"][0][0])
            rows.append([t["text"] for t in current_row])
            
        return rows

    def visualize_result(self, image_path, result, output_path=None):
        """可视化OCR和布局分析结果"""
        image = cv2.imread(image_path)
        if image is None:
            raise ValueError(f"无法读取图像: {image_path}")
        
        # 绘制布局块
        for block in result["blocks"]:
            x1, y1, x2, y2 = block["coordinates"]
            
            # 根据块类型选择颜色
            if block["type"] == "table":
                color = (0, 0, 255)  # 红色表示表格
            elif block["type"] == "title":
                color = (255, 0, 0)  # 蓝色表示标题
            else:
                color = (0, 255, 0)  # 绿色表示文本
                
            # 绘制矩形
            cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
            
            # 添加类型标签
            cv2.putText(image, block["type"], (x1, y1-10), 
                        cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
            
            # 绘制文本框（可选）
            if block["type"] != "table":
                for text_item in block["content"]:
                    if "bbox" in text_item:
                        bbox = text_item["bbox"]
                        pts = np.array(bbox, np.int32).reshape((-1, 1, 2))
                        cv2.polylines(image, [pts], True, (255, 255, 0), 1)
        
        # 保存结果
        if output_path is None:
            base_dir = os.path.dirname(image_path)
            base_name = os.path.basename(image_path).split('.')[0]
            output_path = os.path.join(base_dir, f"{base_name}_visualized.jpg")
            
        cv2.imwrite(output_path, image)
        return output_path

if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='Image OCR and Layout Analysis')
    parser.add_argument('image_path', help='Path to the image file')
    parser.add_argument('--output', '-o', help='Output directory', default=None)
    parser.add_argument('--lang', '-l', help='Language for OCR', default='ch')
    parser.add_argument('--visualize', '-v', action='store_true', help='Visualize results')
    
    args = parser.parse_args()
    
    processor = ImageOCRProcessor(lang=args.lang)
    result_path = processor.process_image(args.image_path, args.output)
    
    print(f"Processing complete. Results saved to: {result_path}")
    
    if args.visualize:
        # 读取结果
        with open(result_path, 'r', encoding='utf-8') as f:
            result = json.load(f)
        
        # 可视化
        vis_path = processor.visualize_result(args.image_path, result)
        print(f"Visualization saved to: {vis_path}") 