import cv2
import numpy as np
import pytesseract
from PIL import Image
from typing import List, Dict, Optional
import logging
import re

class TextRecognizer:
    """OCR文本识别模块"""
    
    def __init__(self, tesseract_path: Optional[str] = None):
        self.logger = logging.getLogger(__name__)
        
        # 设置Tesseract路径（如果需要）
        if tesseract_path:
            pytesseract.pytesseract.tesseract_cmd = tesseract_path
        
        # 配置OCR参数
        self.config = '--oem 3 --psm 6 -c tessedit_char_whitelist=0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz一二三四五六七八九十百千万亿是是否否真假开始结束处理决策流程输入输出用户登录验证成功失败错误异常'
    
    def extract_text_from_nodes(self, image: np.ndarray, nodes: List[Dict]) -> List[Dict]:
        """从节点中提取文本"""
        self.logger.info("开始提取节点文本")
        
        for node in nodes:
            # 提取节点区域的图像
            node_image = self._extract_node_region(image, node)
            
            # 预处理节点图像
            processed_image = self._preprocess_node_image(node_image)
            
            # 执行OCR识别
            text = self._perform_ocr(processed_image)
            
            # 后处理文本
            cleaned_text = self._post_process_text(text)
            
            # 添加到节点信息中
            node['text'] = cleaned_text
            node['text_confidence'] = self._calculate_text_confidence(cleaned_text, node_image)
        
        self.logger.info(f"完成 {len(nodes)} 个节点的文本提取")
        return nodes
    
    def _extract_node_region(self, image: np.ndarray, node: Dict) -> np.ndarray:
        """提取节点区域的图像"""
        pos = node['position']
        x, y, w, h = pos['x'], pos['y'], pos['width'], pos['height']
        
        # 添加边距以包含完整文本
        margin = 5
        x = max(0, x - margin)
        y = max(0, y - margin)
        w = min(image.shape[1] - x, w + 2 * margin)
        h = min(image.shape[0] - y, h + 2 * margin)
        
        return image[y:y+h, x:x+w]
    
    def _preprocess_node_image(self, node_image: np.ndarray) -> np.ndarray:
        """预处理节点图像以提高OCR效果"""
        # 转换为灰度图
        if len(node_image.shape) == 3:
            gray = cv2.cvtColor(node_image, cv2.COLOR_BGR2GRAY)
        else:
            gray = node_image
        
        # 图像增强
        # 对比度增强
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        enhanced = clahe.apply(gray)
        
        # 去噪
        denoised = cv2.fastNlMeansDenoising(enhanced)
        
        # 二值化
        _, binary = cv2.threshold(denoised, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        
        # 形态学操作去除小噪点
        kernel = np.ones((1, 1), np.uint8)
        cleaned = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
        
        return cleaned
    
    def _perform_ocr(self, image: np.ndarray) -> str:
        """执行OCR识别"""
        try:
            # 确保图像是uint8类型
            if image.dtype != np.uint8:
                image = image.astype(np.uint8)
            
            # 转换为PIL图像
            pil_image = Image.fromarray(image)
            
            # 执行OCR
            text = pytesseract.image_to_string(pil_image, config=self.config, lang='chi_sim+eng')
            
            return text.strip()
        
        except Exception as e:
            self.logger.warning(f"OCR识别失败: {e}")
            return ""
    
    def _post_process_text(self, text: str) -> str:
        """后处理OCR识别的文本"""
        if not text:
            return ""
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        text = text.strip()
        
        # 移除特殊字符
        text = re.sub(r'[^\w\s\u4e00-\u9fff]', '', text)
        
        # 常见文本校正
        text_corrections = {
            '用尸登录': '用户登录',
            '验证成力': '验证成功',
            '开蛤': '开始',
            '结柬': '结束',
            '处埋': '处理',
            '决茉': '决策',
            '流稈': '流程'
        }
        
        for wrong, correct in text_corrections.items():
            text = text.replace(wrong, correct)
        
        return text
    
    def _calculate_text_confidence(self, text: str, node_image: np.ndarray) -> float:
        """计算文本识别的置信度"""
        if not text:
            return 0.0
        
        confidence = 0.5  # 基础置信度
        
        # 根据文本长度调整置信度
        if len(text) >= 2:
            confidence += 0.2
        
        # 根据图像质量调整置信度
        # 计算图像的清晰度
        gray = cv2.cvtColor(node_image, cv2.COLOR_BGR2GRAY) if len(node_image.shape) == 3 else node_image
        laplacian_var = cv2.Laplacian(gray, cv2.CV_64F).var()
        
        if laplacian_var > 100:  # 图像较清晰
            confidence += 0.2
        elif laplacian_var > 50:  # 图像一般
            confidence += 0.1
        
        # 根据文本内容调整置信度
        if any(keyword in text for keyword in ['开始', '结束', '处理', '决策', '流程']):
            confidence += 0.1
        
        return min(1.0, confidence)
    
    def extract_text_with_confidence(self, image: np.ndarray, nodes: List[Dict]) -> List[Dict]:
        """提取文本并计算置信度"""
        nodes_with_text = self.extract_text_from_nodes(image, nodes)
        
        # 过滤低置信度的文本
        filtered_nodes = []
        for node in nodes_with_text:
            if node['text_confidence'] > 0.3 or node['text']:  # 保留有文本或置信度较高的节点
                filtered_nodes.append(node)
        
        self.logger.info(f"文本提取完成，有效节点: {len(filtered_nodes)}")
        return filtered_nodes
    
    def batch_extract_text(self, image: np.ndarray, nodes: List[Dict]) -> List[Dict]:
        """批量提取文本（优化版本）"""
        self.logger.info(f"批量提取 {len(nodes)} 个节点的文本")
        
        # 并行处理所有节点
        processed_nodes = []
        
        for i, node in enumerate(nodes):
            try:
                # 提取节点区域
                node_image = self._extract_node_region(image, node)
                
                # 检查节点图像是否有效
                if node_image.size == 0:
                    node['text'] = ""
                    node['text_confidence'] = 0.0
                    processed_nodes.append(node)
                    continue
                
                # 预处理
                processed_image = self._preprocess_node_image(node_image)
                
                # OCR识别
                text = self._perform_ocr(processed_image)
                cleaned_text = self._post_process_text(text)
                
                # 计算置信度
                confidence = self._calculate_text_confidence(cleaned_text, node_image)
                
                # 更新节点信息
                node['text'] = cleaned_text
                node['text_confidence'] = confidence
                
                processed_nodes.append(node)
                
                if i % 10 == 0:  # 每处理10个节点输出一次进度
                    self.logger.info(f"已处理 {i+1}/{len(nodes)} 个节点")
                    
            except Exception as e:
                self.logger.error(f"处理节点 {i} 时出错: {e}")
                node['text'] = ""
                node['text_confidence'] = 0.0
                processed_nodes.append(node)
        
        return processed_nodes 