import re
from typing import List, Dict, Any
from PIL import Image
import warnings
warnings.filterwarnings('ignore')

# 尝试导入OCR
try:
    import paddleocr
    PADDLEOCR_AVAILABLE = True
except ImportError:
    PADDLEOCR_AVAILABLE = False
    print("警告: PaddleOCR未安装，图像OCR功能将不可用")

# 尝试导入transformers
try:
    from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification
    TRANSFORMERS_AVAILABLE = True
except ImportError:
    TRANSFORMERS_AVAILABLE = False
    print("警告: transformers未安装，NER功能将使用规则匹配")

class DetectionService:
    """多模态敏感信息检测服务"""
    
    def __init__(self):
        # 初始化OCR
        self.ocr = None
        if PADDLEOCR_AVAILABLE:
            try:
                # 注意：PaddleOCR新版本可能不支持show_log参数，移除该参数
                self.ocr = paddleocr.PaddleOCR(use_angle_cls=True, lang='ch')
            except Exception as e:
                print(f"OCR初始化失败: {e}")
                # 尝试不使用角度分类
                try:
                    self.ocr = paddleocr.PaddleOCR(lang='ch')
                except Exception as e2:
                    print(f"OCR初始化再次失败: {e2}")
                    self.ocr = None
        
        # 初始化NER模型（用于文本敏感信息识别）
        self.ner_pipeline = None
        if TRANSFORMERS_AVAILABLE:
            try:
                self.ner_tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
                self.ner_model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
                self.ner_pipeline = pipeline("ner", 
                                            model=self.ner_model, 
                                            tokenizer=self.ner_tokenizer,
                                            aggregation_strategy="simple")
            except Exception as e:
                print(f"警告: NER模型加载失败，将使用规则匹配: {e}")
                self.ner_pipeline = None
        
        # 敏感信息正则表达式模式
        self.sensitive_patterns = {
            'id_card': [
                r'\d{17}[\dXx]',  # 18位身份证号
                r'\d{15}',  # 15位身份证号
            ],
            'phone': [
                r'1[3-9]\d{9}',  # 手机号
                r'\d{3,4}-\d{7,8}',  # 固定电话
            ],
            'bank_card': [
                r'\d{16,19}',  # 银行卡号
            ],
            'email': [
                r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}',
            ],
            'address': [
                r'[\u4e00-\u9fa5]{5,}.*?(省|市|区|县|街道|路|号)',
            ]
        }
    
    def detect_text(self, text: str) -> Dict[str, Any]:
        """检测文本中的敏感信息"""
        detected_items = []
        
        # 使用规则匹配
        for category, patterns in self.sensitive_patterns.items():
            for pattern in patterns:
                matches = re.finditer(pattern, text)
                for match in matches:
                    detected_items.append({
                        'type': category,
                        'content': match.group(),
                        'start': match.start(),
                        'end': match.end(),
                        'confidence': 0.9
                    })
        
        # 使用NER模型（如果可用）
        if self.ner_pipeline:
            try:
                ner_results = self.ner_pipeline(text)
                for entity in ner_results:
                    if entity.get('score', 0) > 0.7:
                        detected_items.append({
                            'type': entity.get('entity_group', 'unknown'),
                            'content': entity.get('word', ''),
                            'start': entity.get('start', 0),
                            'end': entity.get('end', 0),
                            'confidence': entity.get('score', 0)
                        })
            except Exception as e:
                print(f"NER处理错误: {e}")
        
        # 去重
        unique_items = []
        seen = set()
        for item in detected_items:
            key = (item['type'], item.get('start', 0), item.get('end', 0))
            if key not in seen:
                seen.add(key)
                unique_items.append(item)
        
        # 计算风险等级
        risk_level = self._calculate_risk_level(unique_items)
        
        return {
            'detected_items': unique_items,
            'count': len(unique_items),
            'risk_level': risk_level,
            'type': 'text'
        }
    
    def detect_image(self, image: Image.Image) -> Dict[str, Any]:
        """检测图片中的敏感信息"""
        detected_items = []
        full_text = ""
        text_regions = []
        
        # OCR提取文字
        if self.ocr:
            try:
                ocr_result = self.ocr.ocr(image, cls=True)
                
                # 提取所有文字内容
                for line in ocr_result[0] if ocr_result else []:
                    if line:
                        text = line[1][0]
                        bbox = line[0]  # 边界框坐标
                        full_text += text + " "
                        text_regions.append({
                            'text': text,
                            'bbox': bbox
                        })
            except Exception as e:
                print(f"OCR处理错误: {e}")
        
        # 检测文本中的敏感信息
        if full_text.strip():
            text_detection = self.detect_text(full_text)
            
            # 将文本检测结果映射回图像位置
            for item in text_detection.get('detected_items', []):
                # 找到对应的图像区域
                for region in text_regions:
                    if item['content'] in region['text']:
                        detected_items.append({
                            'type': item['type'],
                            'content': item['content'],
                            'bbox': region['bbox'],
                            'confidence': item['confidence']
                        })
                        break
        
        # 检测身份证形状（简单示例，实际应使用目标检测模型）
        # 这里可以使用YOLO等模型进行目标检测
        
        risk_level = self._calculate_risk_level(detected_items)
        
        return {
            'detected_items': detected_items,
            'count': len(detected_items),
            'risk_level': risk_level,
            'type': 'image',
            'extracted_text': full_text.strip()
        }
    
    def _calculate_risk_level(self, items: List[Dict]) -> str:
        """计算风险等级"""
        if not items:
            return 'low'
        
        high_risk_types = {'id_card', 'bank_card'}
        medium_risk_types = {'phone', 'email'}
        
        has_high_risk = any(item['type'] in high_risk_types for item in items)
        has_medium_risk = any(item['type'] in medium_risk_types for item in items)
        
        if has_high_risk or len(items) >= 3:
            return 'high'
        elif has_medium_risk or len(items) >= 2:
            return 'medium'
        else:
            return 'low'

