import cv2
import numpy as np
from paddleocr import PaddleOCR
import os
from difflib import SequenceMatcher
import re

class ImageDifferenceDetector:
    """
    图像差异检测器类
    用于检测两张图片之间的文本差异
    """
    
    def __init__(self, lang='en'):
        """
        初始化图像差异检测器
        """
        self.ocr = self._initialize_ocr(lang)
    
    def _initialize_ocr(self, lang):
        """
        初始化 PaddleOCR
        """
        try:
            ocr = PaddleOCR(use_doc_orientation_classify=False,
                            use_doc_unwarping=False,
                            use_textline_orientation=False,
                            lang=lang)
            return ocr
        except Exception as e:
            print(f"初始化 PaddleOCR 失败: {e}")
            print("请检查 PaddleOCR 版本和参数。如果 'lang' 报错，请尝试移除它并重新运行。")
            try:
                print("尝试不带 'lang' 参数初始化 PaddleOCR...")
                ocr = PaddleOCR(use_doc_orientation_classify=False,
                                use_doc_unwarping=False,
                                use_textline_orientation=False)
                print("注意：如果 OCR 结果不准确，可能需要检查默认语言模型或在 get_text_from_image 中明确指定语言。")
                return ocr
            except Exception as e_no_lang:
                print(f"再次初始化 PaddleOCR 失败 (不带 'lang'): {e_no_lang}")
                print("请确保 PaddleOCR 正确安装，并且您的 Python 环境与所选的 PaddleOCR 版本兼容。")
                raise
    
    @staticmethod
    def get_dynamic_confidence_threshold(text_length):
        """
        根据文本长度动态调整OCR置信度阈值
        短文本使用较低阈值，长文本使用较高阈值
        """
        if text_length <= 2:
            return 0.3  # 很短的文本（1-2个字符）
        elif text_length <= 5:
            return 0.4  # 短文本（3-5个字符）
        elif text_length <= 10:
            return 0.5  # 中等长度文本（6-10个字符）
        else:
            return 0.6  # 长文本（10个字符以上）

    def preprocess_image(self, image_path):
        """
        预处理图像：读取、灰度化、二值化和降噪处理。
        """
        img = cv2.imread(image_path)
        if img is None:
            raise FileNotFoundError(f"无法读取图片: {image_path}")
        
        # 转换为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        # 高斯模糊去噪
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        
        # 自适应阈值二值化 - 对不同光照条件更鲁棒
        binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, 
                                       cv2.THRESH_BINARY, 11, 2)
        
        # 形态学操作去除小噪点
        kernel = np.ones((2,2), np.uint8)
        cleaned = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
        
        return img, cleaned

    def preprocess_image_for_ocr(self, image_path):
        """
        专门为OCR优化的预处理函数
        """
        img = cv2.imread(image_path)
        if img is None:
            raise FileNotFoundError(f"无法读取图片: {image_path}")
        
        # 转换为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        # 增强对比度
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        enhanced = clahe.apply(gray)
        
        # 高斯模糊
        blurred = cv2.GaussianBlur(enhanced, (3, 3), 0)
        
        # Otsu二值化 - 自动选择最佳阈值
        _, binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # 保存预处理后的图像用于OCR
        processed_path = image_path.replace('.png', '_processed.png')
        cv2.imwrite(processed_path, binary)
        
        return img, gray, processed_path

    def get_text_from_image(self, image_path):
        """
        使用 PaddleOCR 检测和识别图像中的所有文本。
        返回一个列表，每个元素包含 (bbox, text, score)。
        """
        # 对图像进行预处理
        original_img, gray_img, processed_path = self.preprocess_image_for_ocr(image_path)
        
        try:
            # 对预处理后的图像进行OCR
            result = self.ocr.predict(processed_path)
        except Exception as e:
            print(f"PaddleOCR 处理图像 '{image_path}' 失败: {e}")
            # 清理临时文件
            if os.path.exists(processed_path):
                os.remove(processed_path)
            return []
        
        # 清理临时预处理文件
        if os.path.exists(processed_path):
            os.remove(processed_path)
        
        texts_found = []
        # 根据你提供的最新输出格式来解析
        if result and isinstance(result, list) and len(result) > 0 and isinstance(result[0], dict):
            page_result = result[0]
            if 'rec_texts' in page_result and 'dt_polys' in page_result:
                rec_texts = page_result['rec_texts']
                dt_polys = page_result['dt_polys']
                rec_scores = page_result.get('rec_scores', [1.0] * len(rec_texts))
                
                for i in range(len(rec_texts)):
                    text = rec_texts[i]
                    bbox_poly = dt_polys[i]
                    score = rec_scores[i]

                    # 将多边形边界框转换为最小外接矩形 (x, y, w, h)
                    x_coords = [point[0] for point in bbox_poly]
                    y_coords = [point[1] for point in bbox_poly]
                    x = int(min(x_coords))
                    y = int(min(y_coords))
                    w = int(max(x_coords) - x)
                    h = int(max(y_coords) - y)
                    
                    texts_found.append({'bbox': (x, y, w, h), 'text': text, 'score': score})
            else:
                print(f"PaddleOCR.predict() 结果字典缺少 'rec_texts' 或 'dt_polys' 键。实际键: {page_result.keys()}")
        else:
            print(f"PaddleOCR.predict() 返回结果格式不识别。预期列表包含字典，实际: {type(result)}")

        return texts_found

    @staticmethod
    def normalize_text_for_comparison(text):
        """标准化文本用于比较"""
        text = str(text).lower()
        # 保留字母数字、空格、连字符、句点、斜线、括号
        text = re.sub(r'[^\w\s\-\.\/\(\)]', '', text)
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    @staticmethod
    def calculate_iou(boxA, boxB):
        """计算两个边界框的IoU"""
        xA = max(boxA[0], boxB[0])
        yA = max(boxA[1], boxB[1])
        xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2])
        yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3])

        interArea = max(0, xB - xA) * max(0, yB - yA)
        boxAArea = boxA[2] * boxA[3]
        boxBArea = boxB[2] * boxB[3]
        iou = interArea / float(boxAArea + boxBArea - interArea)
        return iou

    @staticmethod
    def align_images(img1_path, img2_path):
        """对齐两张图片"""
        img1 = cv2.imread(img1_path)
        img2 = cv2.imread(img2_path)
        
        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        
        # 使用ORB特征检测器
        orb = cv2.ORB_create(nfeatures=5000)
        kp1, des1 = orb.detectAndCompute(gray1, None)
        kp2, des2 = orb.detectAndCompute(gray2, None)
        
        if des1 is None or des2 is None or len(des1) < 4 or len(des2) < 4:
            print("警告: 无法提取足够的特征点进行对齐，使用原始图像")
            return img2
        
        # 特征匹配
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(des1, des2)
        matches = sorted(matches, key=lambda x: x.distance)
        
        # 只使用最好的匹配点
        good_matches = matches[:min(len(matches), 100)]
        
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        
        try:
            H, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
            if H is None:
                print("警告: 无法计算单应性矩阵，使用原始图像")
                return img2
            
            h, w = img1.shape[:2]
            aligned_img2 = cv2.warpPerspective(img2, H, (w, h))
            return aligned_img2
        except Exception as e:
            print(f"警告: 图像对齐失败 ({e})，使用原始图像")
            return img2

    @staticmethod
    def highlight_detailed_differences(image_to_draw_on, text_info1, text_info2, w1_offset):
        """
        Args:
            image_to_draw_on: 要在上面绘制标注框的图像 (numpy array)。
            text_info1: 第一个文本的信息字典 {'text': ..., 'bbox': ...}。
            text_info2: 第二个文本的信息字典。
            w1_offset: 第一个图像的宽度，用于在拼接图的右侧部分正确绘制标注框。
        """
        text1 = text_info1['text']
        bbox1 = text_info1['bbox']
        text2 = text_info2['text']
        bbox2 = text_info2['bbox']

        # 核心工具：使用SequenceMatcher来查找详细差异
        s = SequenceMatcher(None, text1, text2)
        
        # --- 处理第一个图像中的差异 ---
        if bbox1:
            x1, y1, w1, h1 = bbox1
            # 核心思想：估算每个字符的平均宽度
            char_width1 = w1 / len(text1) if len(text1) > 0 else 0
            
            # get_opcodes()返回一个指令列表，告诉我们哪些部分是'equal'(相同), 'delete'(删除), 'insert'(插入), 'replace'(替换)
            for tag, i1, i2, j1, j2 in s.get_opcodes():
                if tag == 'equal':
                    continue
                
                # 如果是'delete'或'replace'，说明text1的这部分是差异点，需要高亮
                if (tag == 'delete' or tag == 'replace') and char_width1 > 0:
                    # 根据字符索引和平均宽度，计算出差异子字符串的精确位置
                    start_x = int(x1 + i1 * char_width1)
                    end_x = int(x1 + i2 * char_width1)
                    # 在拼接图的左侧部分绘制更小的、更精确的标注框
                    cv2.rectangle(image_to_draw_on, (start_x, y1), (end_x, y1 + h1), (0, 0, 255), 2)

        # --- 处理第二个图像中的差异 ---
        if bbox2:
            x2, y2, w2, h2 = bbox2
            char_width2 = w2 / len(text2) if len(text2) > 0 else 0
            
            for tag, i1, i2, j1, j2 in s.get_opcodes():
                if tag == 'equal':
                    continue
                
                # 如果是'insert'或'replace'，说明text2的这部分是差异点，需要高亮
                if (tag == 'insert' or tag == 'replace') and char_width2 > 0:
                    start_x = int(x2 + j1 * char_width2)
                    end_x = int(x2 + j2 * char_width2)
                    # 在拼接图的右侧部分绘制标注框，注意x坐标要加上第一个图像的宽度(w1_offset)
                    cv2.rectangle(image_to_draw_on, (start_x + w1_offset, y2), (end_x + w1_offset, y2 + h2), (0, 0, 255), 2)

    def find_differences_and_annotate_generic(self, img1_path, img2_path, result_img_path=None, output_path='generated_difference_result.png', show_result=False):
        """
        通用比对两张图片差异
        """
        print("开始图像差异检测...")
        
        # 读取原始图像
        img1_color = cv2.imread(img1_path)
        img2_color = cv2.imread(img2_path)
        
        if img1_color is None or img2_color is None:
            raise FileNotFoundError("无法读取输入图像")
        
        h1, w1 = img1_color.shape[:2]
        
        # 对齐图像
        print("对齐图像中...")
        aligned_img2_color = self.align_images(img1_path, img2_path)
        
        # 保存对齐后的图像用于OCR
        aligned_img2_temp_path = 'aligned_img2_temp.png'
        cv2.imwrite(aligned_img2_temp_path, aligned_img2_color)
        
        try:
            # OCR文本提取
            print("对图像1进行OCR...")
            texts1_info = self.get_text_from_image(img1_path)
            print(f"图像1识别到 {len(texts1_info)} 个文本区域")
            for t_info in texts1_info:
                print(f"  '{t_info['text']}' @ {t_info['bbox']} (score: {t_info['score']:.2f})")
            
            print("\n对对齐后的图像2进行OCR...")
            texts2_info = self.get_text_from_image(aligned_img2_temp_path)
            print(f"图像2识别到 {len(texts2_info)} 个文本区域")
            for t_info in texts2_info:
                print(f"  '{t_info['text']}' @ {t_info['bbox']} (score: {t_info['score']:.2f})")
            
            # 创建结果显示图像
            combined_img_display = np.zeros((h1, w1 * 2, 3), dtype=np.uint8)
            combined_img_display[:, :w1] = img1_color
            combined_img_display[:, w1:] = aligned_img2_color
            
            # 差异检测参数
            iou_threshold_match = 0.3  # 降低IoU阈值以提高匹配率
            similarity_threshold_content = 0.85  # 相似度阈值
            
            differences_detected = {}
            matched_texts2_indices = [False] * len(texts2_info)
            
            print("\n开始差异检测...")
            
            # 遍历图像1中的文本，寻找图像2中的对应文本
            for i, text1_item in enumerate(texts1_info):
                text1_bbox = text1_item['bbox']
                text1_content_original = text1_item['text'].strip()
                text1_content_normalized = self.normalize_text_for_comparison(text1_content_original)
                
                # 过滤掉置信度过低或过短的文本
                confidence_threshold = self.get_dynamic_confidence_threshold(len(text1_content_normalized))
                if text1_item['score'] < confidence_threshold or len(text1_content_normalized) <= 1:
                    continue
                
                best_match_idx = -1
                max_iou = 0
                best_similarity = 0
                
                # 寻找最佳匹配
                for j, text2_item in enumerate(texts2_info):
                    if matched_texts2_indices[j]:
                        continue
                    
                    text2_content_normalized = self.normalize_text_for_comparison(text2_item['text'].strip())
                    confidence_threshold2 = self.get_dynamic_confidence_threshold(len(text2_content_normalized))
                    
                    if text2_item['score'] < confidence_threshold2 or len(text2_content_normalized) <= 1:
                        continue
                    
                    current_iou = self.calculate_iou(text1_bbox, text2_item['bbox'])
                    current_similarity = SequenceMatcher(None, text1_content_normalized, text2_content_normalized).ratio()
                    
                    # 综合考虑IoU和文本相似度
                    if current_iou > max_iou or (current_iou >= 0.1 and current_similarity > best_similarity):
                        max_iou = current_iou
                        best_similarity = current_similarity
                        best_match_idx = j
                
                # 处理匹配结果
                if best_match_idx != -1 and (max_iou >= iou_threshold_match or best_similarity >= similarity_threshold_content):
                    text2_item = texts2_info[best_match_idx]
                    text2_content_original = text2_item['text'].strip()
                    
                    # 检查内容是否不同
                    if best_similarity < similarity_threshold_content:
                        diff_key = f"内容差异: '{text1_content_original}' vs '{text2_content_original}'"
                        differences_detected[diff_key] = {
                            'img1_text': text1_content_original, 'img1_bbox': text1_bbox,
                            'img2_text': text2_content_original, 'img2_bbox': text2_item['bbox'],
                            'type': 'content_mismatch'
                        }
                        print(f"  发现差异: {diff_key} (相似度: {best_similarity:.2f})")
                        
                        # 新增调用，实现更精确的差异标注
                        self.highlight_detailed_differences(combined_img_display, text1_item, text2_item, w1)
                    
                    matched_texts2_indices[best_match_idx] = True
                else:
                    # 在图像2中未找到匹配的文本
                    diff_key = f"图像2中缺失: '{text1_content_original}'"
                    differences_detected[diff_key] = {
                        'img1_text': text1_content_original, 'img1_bbox': text1_bbox,
                        'img2_text': "N/A", 'img2_bbox': None,
                        'type': 'missing_in_img2'
                    }
                    print(f"  发现差异: {diff_key}")
                    
                    # 对缺失的文本，依然标注整个区域
                    x, y, w, h = text1_bbox
                    cv2.rectangle(combined_img_display, (x, y), (x + w, y + h), (0, 0, 255), 2)
            
            # 检查图像2中新增的文本
            for j, text2_item in enumerate(texts2_info):
                if not matched_texts2_indices[j]:
                    text2_content_original = text2_item['text'].strip()
                    text2_content_normalized = self.normalize_text_for_comparison(text2_content_original)
                    
                    confidence_threshold = self.get_dynamic_confidence_threshold(len(text2_content_normalized))
                    if text2_item['score'] >= confidence_threshold and len(text2_content_normalized) > 1:
                        diff_key = f"图像2中新增: '{text2_content_original}'"
                        differences_detected[diff_key] = {
                            'img1_text': "N/A", 'img1_bbox': None,
                            'img2_text': text2_content_original, 'img2_bbox': text2_item['bbox'],
                            'type': 'added_in_img2'
                        }
                        print(f"  发现差异: {diff_key}")
                        
                        # 对新增的文本，依然标注整个区域
                        x, y, w, h = text2_item['bbox']
                        cv2.rectangle(combined_img_display, (x + w1, y), (x + w + w1, y + h), (0, 0, 255), 2)
            
            print(f"\n共检测到 {len(differences_detected)} 个差异")
            
            # 评估结果（如果提供了参考图像）
            if result_img_path and os.path.exists(result_img_path):
                print("\n评估检测结果...")
                self.evaluate_results(combined_img_display, result_img_path)
            
            # 保存结果
            cv2.imwrite(output_path, combined_img_display)
            print(f"\n结果已保存到: {output_path}")
            
            # 显示结果（可选）
            if show_result:
                cv2.imshow("Difference Detection Result", combined_img_display)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
            
            return differences_detected, combined_img_display
        
        finally:
            # 清理临时文件
            if os.path.exists(aligned_img2_temp_path):
                os.remove(aligned_img2_temp_path)

    def evaluate_results(self, generated_img, result_img_path):
        """评估检测结果的准确性"""
        expected_result_img = cv2.imread(result_img_path)
        if expected_result_img is None:
            print("警告: 无法加载参考结果图像")
            return
        
        # 调整大小以匹配生成的图像
        expected_result_img_resized = cv2.resize(expected_result_img,
                                               (generated_img.shape[1], generated_img.shape[0]),
                                               interpolation=cv2.INTER_AREA)
        
        # 检测红色标注框
        def detect_red_boxes(img):
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            lower_red1 = np.array([0, 100, 100])
            upper_red1 = np.array([10, 255, 255])
            lower_red2 = np.array([160, 100, 100])
            upper_red2 = np.array([179, 255, 255])
            red_mask = cv2.inRange(hsv, lower_red1, upper_red1) + cv2.inRange(hsv, lower_red2, upper_red2)
            contours, _ = cv2.findContours(red_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            return [cv2.boundingRect(cnt) for cnt in contours if cv2.contourArea(cnt) > 20]
        
        expected_boxes = detect_red_boxes(expected_result_img_resized)
        generated_boxes = detect_red_boxes(generated_img)
        
        print(f"期望标注框数量: {len(expected_boxes)}")
        print(f"生成标注框数量: {len(generated_boxes)}")
        
        # 计算匹配的标注框
        iou_threshold = 0.5
        matched_count = 0
        matched_generated = [False] * len(generated_boxes)
        
        for expected_box in expected_boxes:
            best_iou = 0
            best_match_idx = -1
            
            for i, generated_box in enumerate(generated_boxes):
                if matched_generated[i]:
                    continue
                
                iou = self.calculate_iou(expected_box, generated_box)
                if iou > best_iou:
                    best_iou = iou
                    best_match_idx = i
            
            if best_iou >= iou_threshold:
                matched_count += 1
                if best_match_idx != -1:
                    matched_generated[best_match_idx] = True
        
        precision = matched_count / len(generated_boxes) if len(generated_boxes) > 0 else 0
        recall = matched_count / len(expected_boxes) if len(expected_boxes) > 0 else 0
        f1_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
        
        print(f"匹配的标注框数量: {matched_count}")
        print(f"精确率 (Precision): {precision:.2f}")
        print(f"召回率 (Recall): {recall:.2f}")
        print(f"F1分数: {f1_score:.2f}")

    def compare_images(self, img1_path, img2_path, result_path=None, output_path='generated_difference_result.png', show_result=False):
        """
        主要的图像比较函数
        """
        try:
            differences, result_img = self.find_differences_and_annotate_generic(
                img1_path, img2_path, result_path, output_path, show_result
            )
            return differences, result_img
        except Exception as e:
            print(f"图像比较过程中发生错误: {e}")
            import traceback
            traceback.print_exc()
            return {}, None


# 便捷函数，用于快速调用
def compare_images(img1_path, img2_path, result_path=None, output_path='generated_difference_result.png', show_result=False, lang='en'):
    """
    便捷的图像比较函数
    """
    detector = ImageDifferenceDetector(lang=lang)
    return detector.compare_images(img1_path, img2_path, result_path, output_path, show_result)

if __name__ == "__main__":
    # 图片路径
    compare_1_path = 'compare_1.png'
    compare_2_path = 'compare_2.png'
    result_path = 'result.png'
    
    if not os.path.exists(compare_1_path):
        print(f"错误: 找不到文件 {compare_1_path}")
    elif not os.path.exists(compare_2_path):
        print(f"错误: 找不到文件 {compare_2_path}")
    else:
        print("开始图像比较...")
        differences, result_img = compare_images(
            compare_1_path, 
            compare_2_path, 
            result_path, 
            output_path='generated_difference_result.png',
            show_result=True
        )
        print(f"\n检测完成，共发现 {len(differences)} 个差异")