from typing import List, Dict, Any, Optional, Callable
from .base_deduper import BaseDeduper, DedupMethodRegistry


class ExactDeduper(BaseDeduper):
    """
    基于精确匹配的去重算法
    适用于需要完全相同的文本去重场景
    """
    
    # 算法描述
    DESCRIPTION = "基于精确文本匹配的去重算法，适用于需要完全相同的文本去重场景"
    
    # 算法参数定义
    PARAMETERS = {
        'case_sensitive': {
            'type': 'bool',
            'description': '是否区分大小写',
            'options': [True, False]
        },
        'strip_whitespace': {
            'type': 'bool',
            'description': '是否去除首尾空白字符',
            'options': [True, False]
        }
    }
    
    # 默认参数
    DEFAULT_PARAMS = {
        'case_sensitive': False,
        'strip_whitespace': True
    }
    
    def __init__(self, texts: List[str], **kwargs):
        super().__init__(texts, **kwargs)
        
        # 提取参数
        self.case_sensitive = kwargs.get('case_sensitive', self.DEFAULT_PARAMS['case_sensitive'])
        self.strip_whitespace = kwargs.get('strip_whitespace', self.DEFAULT_PARAMS['strip_whitespace'])
        
        # 预处理文本
        self.processed_texts = self._preprocess_texts(texts)
        
    def _preprocess_texts(self, texts: List[str]) -> List[str]:
        """预处理文本"""
        processed = []
        for text in texts:
            processed_text = text
            if self.strip_whitespace:
                processed_text = processed_text.strip()
            if not self.case_sensitive:
                processed_text = processed_text.lower()
            processed.append(processed_text)
        return processed
        
    def execute(self) -> List[str]:
        """执行精确去重"""
        seen = set()
        unique_texts = []
        duplicate_indices = []
        
        total_texts = len(self.processed_texts)
        
        for i, processed_text in enumerate(self.processed_texts):
            if processed_text not in seen:
                seen.add(processed_text)
                unique_texts.append(self.texts[i])  # 保存原始文本
            else:
                duplicate_indices.append(i)
                
            # 进度回调
            if self.progress_callback:
                self.progress_callback(i + 1, total_texts, "检测重复文本")
        
        # 记录重复对信息
        self._record_duplicate_pairs(duplicate_indices)
        
        return unique_texts
        
    def _record_duplicate_pairs(self, duplicate_indices: List[int]):
        """记录重复对信息"""
        text_to_first_index = {}
        
        for i, processed_text in enumerate(self.processed_texts):
            if processed_text not in text_to_first_index:
                text_to_first_index[processed_text] = i
            else:
                # 找到重复对
                first_index = text_to_first_index[processed_text]
                self.duplicate_pairs.append({
                    "doc1_id": first_index,
                    "doc2_id": i,
                    "similarity": 1.0,  # 精确匹配，相似度为1.0
                    "doc1_content": self.texts[first_index],
                    "doc2_content": self.texts[i],
                    "match_type": "exact"
                })


# 注册精确匹配去重方法
DedupMethodRegistry.register('exact', ExactDeduper)
