import pytesseract
from PIL import Image
import cv2
import numpy as np
from langchain.schema import Document
import json


class MindMapProcessor:
    """思维导图处理器"""

    def __init__(self):
        self.ocr_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789中文汉字（）()【】[]：:，,. '

    def detect_mindmap_structure(self, image_path):
        """检测思维导图结构"""
        # 读取图片
        img = cv2.imread(image_path)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 边缘检测
        edges = cv2.Canny(gray, 50, 150)

        # 检测直线（思维导图的连线）
        lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=50,
                                minLineLength=30, maxLineGap=10)

        # 检测圆形/椭圆形（节点）
        circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20,
                                   param1=50, param2=30, minRadius=10, maxRadius=100)

        structure_info = {
            'lines_count': len(lines) if lines is not None else 0,
            'circles_count': len(circles[0]) if circles is not None else 0,
            'image_shape': img.shape
        }

        return structure_info


    def extract_mindmap_by_regions(self, image_path):
        """分区提取思维导图内容"""
        img = cv2.imread(image_path)
        height, width = img.shape[:2]

        # 定义思维导图常见区域
        regions = {
            'center': (width // 4, height // 4, width // 2, height // 2),  # 中心主题
            'top_left': (0, 0, width // 2, height // 2),
            'top_right': (width // 2, 0, width, height // 2),
            'bottom_left': (0, height // 2, width // 2, height),
            'bottom_right': (width // 2, height // 2, width, height)
        }

        results = {}

        for region_name, (x1, y1, x2, y2) in regions.items():
            # 裁剪区域
            region_img = img[y1:y2, x1:x2]

            # 转换为 PIL Image 进行 OCR
            pil_region = Image.fromarray(cv2.cvtColor(region_img, cv2.COLOR_BGR2RGB))

            # OCR 识别
            text = pytesseract.image_to_string(
                pil_region,
                lang='chi_sim+eng',
                config=self.ocr_config
            ).strip()

            if text:
                results[region_name] = text

        return results

    def extract_by_connected_components(self, image_path):
        """基于连通组件提取文字区域"""
        img = cv2.imread(image_path)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # 二值化
        _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

        # 形态学操作连接文字区域
        kernel = np.ones((3, 3), np.uint8)
        dilated = cv2.dilate(binary, kernel, iterations=2)

        # 查找连通组件
        num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(dilated, connectivity=8)

        text_blocks = []

        for i in range(1, num_labels):  # 跳过背景
            x, y, w, h, area = stats[i]

            # 过滤太小的区域（噪声）
            if area > 100 and w > 20 and h > 20:
                # 提取文字区域
                text_region = img[y:y + h, x:x + w]

                # OCR 识别
                pil_region = Image.fromarray(cv2.cvtColor(text_region, cv2.COLOR_BGR2RGB))
                text = pytesseract.image_to_string(
                    pil_region,
                    lang='chi_sim+eng',
                    config='--psm 7'  # 单行文本
                ).strip()

                if text:
                    text_blocks.append({
                        'text': text,
                        'position': (x, y, w, h),
                        'center': (x + w // 2, y + h // 2)
                    })

        # 按位置排序（大致按阅读顺序）
        text_blocks.sort(key=lambda x: (x['center'][1], x['center'][0]))

        return text_blocks

    def process_mindmap_comprehensive(self, image_path, use_ai=False, api_key=None):
        """综合处理思维导图"""
        print("开始处理思维导图...")

        # 1. 检测结构
        structure_info = self.detect_mindmap_structure(image_path)
        print(f"检测到结构: {structure_info}")

        # 2. 分区识别
        region_results = self.extract_mindmap_by_regions(image_path)
        print("分区识别结果:", region_results)

        # 3. 连通组件识别
        text_blocks = self.extract_by_connected_components(image_path)
        print(f"找到 {len(text_blocks)} 个文字块")

        # 4. AI 分析（可选）
        ai_analysis = None
        if use_ai and api_key:
            print("使用 AI 分析思维导图结构...")
            ai_analysis = self.analyze_mindmap_with_ai(image_path, api_key)

        # 5. 整合结果
        all_text = self._combine_results(region_results, text_blocks, ai_analysis)

        # 创建 LangChain Document
        document = Document(
            page_content=all_text,
            metadata={
                "source": image_path,
                "content_type": "mindmap",
                "structure_info": structure_info,
                "text_blocks_count": len(text_blocks),
                "regions_found": list(region_results.keys()),
                "ai_analysis_used": use_ai
            }
        )

        return document

    def _combine_results(self, region_results, text_blocks, ai_analysis):
        """整合所有识别结果"""
        combined = "思维导图内容提取结果:\n\n"

        # 添加分区结果
        combined += "=== 分区识别 ===\n"
        for region, text in region_results.items():
            combined += f"{region}: {text}\n"

        # 添加文字块结果
        combined += "\n=== 详细文字块 ===\n"
        for i, block in enumerate(text_blocks, 1):
            combined += f"{i}. {block['text']} (位置: {block['position']})\n"

        # 添加 AI 分析结果
        if ai_analysis:
            combined += f"\n=== AI 结构分析 ===\n{ai_analysis}"

        return combined

file_path = "../resource/feature_history2.jpg"
result =MindMapProcessor().process_mindmap_comprehensive(file_path)
print(result)