import hashlib
from typing import List, Dict, Any, Optional, Callable
from .base_deduper import BaseDeduper, DedupMethodRegistry


class MD5Deduper(BaseDeduper):
    """
    基于MD5哈希的去重算法
    适用于检测完全相同的文本，也可用于近似重复检测
    """
    
    # 算法描述
    DESCRIPTION = "基于MD5哈希的去重算法，适用于检测完全相同的文本和近似重复"
    
    # 算法参数定义
    PARAMETERS = {
        'case_sensitive': {
            'type': 'bool',
            'description': '是否区分大小写',
            'options': [True, False]
        },
        'strip_whitespace': {
            'type': 'bool',
            'description': '是否去除首尾空白字符',
            'options': [True, False]
        },
        'normalize_unicode': {
            'type': 'bool',
            'description': '是否标准化Unicode字符',
            'options': [True, False]
        },
        'hash_length': {
            'type': 'int',
            'description': '使用的MD5哈希长度（字节数）',
            'range': [1, 16]
        },
        'use_rolling_hash': {
            'type': 'bool',
            'description': '是否使用滚动哈希检测近似重复',
            'options': [True, False]
        },
        'window_size': {
            'type': 'int',
            'description': '滚动哈希窗口大小（字符数）',
            'range': [10, 1000]
        },
        'similarity_threshold': {
            'type': 'float',
            'description': '近似重复检测的相似度阈值',
            'range': [0.1, 1.0],
            'step': 0.05
        }
    }
    
    # 默认参数
    DEFAULT_PARAMS = {
        'case_sensitive': False,
        'strip_whitespace': True,
        'normalize_unicode': True,
        'hash_length': 16,
        'use_rolling_hash': False,
        'window_size': 50,
        'similarity_threshold': 0.8
    }
    
    def __init__(self, texts: List[str], **kwargs):
        super().__init__(texts, **kwargs)
        
        # 提取参数
        self.case_sensitive = kwargs.get('case_sensitive', self.DEFAULT_PARAMS['case_sensitive'])
        self.strip_whitespace = kwargs.get('strip_whitespace', self.DEFAULT_PARAMS['strip_whitespace'])
        self.normalize_unicode = kwargs.get('normalize_unicode', self.DEFAULT_PARAMS['normalize_unicode'])
        self.hash_length = kwargs.get('hash_length', self.DEFAULT_PARAMS['hash_length'])
        self.use_rolling_hash = kwargs.get('use_rolling_hash', self.DEFAULT_PARAMS['use_rolling_hash'])
        self.window_size = kwargs.get('window_size', self.DEFAULT_PARAMS['window_size'])
        self.similarity_threshold = kwargs.get('similarity_threshold', self.DEFAULT_PARAMS['similarity_threshold'])
        
        # 验证参数
        if self.hash_length < 1 or self.hash_length > 16:
            raise ValueError(f"hash_length 必须在 1 到 16 之间")
        
        # 预处理文本
        self.processed_texts = self._preprocess_texts(texts)
        
        # 计算MD5哈希
        self.hashes = self._compute_hashes(self.processed_texts)
        
    def _preprocess_texts(self, texts: List[str]) -> List[str]:
        """预处理文本"""
        processed = []
        for text in texts:
            processed_text = text
            
            if self.strip_whitespace:
                processed_text = processed_text.strip()
            
            if not self.case_sensitive:
                processed_text = processed_text.lower()
            
            if self.normalize_unicode:
                # 标准化Unicode字符（例如将全角字符转换为半角）
                import unicodedata
                processed_text = unicodedata.normalize('NFKC', processed_text)
            
            processed.append(processed_text)
        return processed
        
    def _compute_hashes(self, texts: List[str]) -> List[str]:
        """计算文本的MD5哈希"""
        hashes = []
        for text in texts:
            # 计算完整MD5哈希
            full_hash = hashlib.md5(text.encode('utf-8')).hexdigest()
            # 根据配置截取指定长度的哈希
            truncated_hash = full_hash[:self.hash_length * 2]  # 每个字节对应2个十六进制字符
            hashes.append(truncated_hash)
        return hashes
        
    def _compute_rolling_hashes(self, text: str) -> List[str]:
        """计算文本的滚动哈希"""
        if len(text) < self.window_size:
            return [hashlib.md5(text.encode('utf-8')).hexdigest()[:self.hash_length * 2]]
        
        rolling_hashes = []
        for i in range(len(text) - self.window_size + 1):
            window = text[i:i + self.window_size]
            window_hash = hashlib.md5(window.encode('utf-8')).hexdigest()[:self.hash_length * 2]
            rolling_hashes.append(window_hash)
        
        return rolling_hashes
        
    def _calculate_similarity(self, text1: str, text2: str) -> float:
        """计算两个文本的相似度"""
        if self.use_rolling_hash:
            # 使用滚动哈希计算相似度
            hashes1 = set(self._compute_rolling_hashes(text1))
            hashes2 = set(self._compute_rolling_hashes(text2))
            
            intersection = len(hashes1 & hashes2)
            union = len(hashes1 | hashes2)
            
            return intersection / union if union > 0 else 0.0
        else:
            # 使用完整文本哈希计算相似度
            return 1.0 if self.hashes[self.processed_texts.index(text1)] == self.hashes[self.processed_texts.index(text2)] else 0.0
        
    def execute(self) -> List[str]:
        """执行MD5去重"""
        if self.use_rolling_hash:
            return self._execute_approximate_dedup()
        else:
            return self._execute_exact_dedup()
            
    def _execute_exact_dedup(self) -> List[str]:
        """执行精确去重（基于完整哈希）"""
        seen_hashes = set()
        unique_texts = []
        duplicate_indices = []
        
        total_texts = len(self.hashes)
        
        for i, text_hash in enumerate(self.hashes):
            if text_hash not in seen_hashes:
                seen_hashes.add(text_hash)
                unique_texts.append(self.texts[i])  # 保存原始文本
            else:
                duplicate_indices.append(i)
                
            # 进度回调
            if self.progress_callback:
                self.progress_callback(i + 1, total_texts, "检测重复文本")
        
        # 记录重复对信息
        self._record_exact_duplicate_pairs(duplicate_indices)
        
        return unique_texts
        
    def _execute_approximate_dedup(self) -> List[str]:
        """执行近似去重（基于滚动哈希）"""
        from .dedupunion import UnionFind
        
        # 初始化并查集
        uf = UnionFind()
        for i in range(len(self.texts)):
            uf.add(i)
        
        # 构建哈希到文档索引的映射
        hash_to_docs = {}
        for i, text in enumerate(self.processed_texts):
            rolling_hashes = self._compute_rolling_hashes(text)
            for window_hash in rolling_hashes:
                if window_hash not in hash_to_docs:
                    hash_to_docs[window_hash] = set()
                hash_to_docs[window_hash].add(i)
        
        # 处理候选重复对
        candidate_pairs = set()
        
        # 查找共享滚动哈希的文档对
        for doc_indices in hash_to_docs.values():
            if len(doc_indices) > 1:
                indices_list = list(doc_indices)
                for i in range(len(indices_list)):
                    for j in range(i + 1, len(indices_list)):
                        candidate_pairs.add((indices_list[i], indices_list[j]))
        
        # 计算相似度并合并相似文档
        total_pairs = len(candidate_pairs)
        processed_pairs = 0
        
        for i, j in candidate_pairs:
            if uf.find(i) != uf.find(j):  # 如果不在同一组
                text1 = self.processed_texts[i]
                text2 = self.processed_texts[j]
                similarity = self._calculate_similarity(text1, text2)
                
                if similarity >= self.similarity_threshold:
                    uf.union(i, j)
                    self.duplicate_pairs.append({
                        "doc1_id": i,
                        "doc2_id": j,
                        "similarity": similarity,
                        "doc1_content": self.texts[i],
                        "doc2_content": self.texts[j],
                        "match_type": "approximate"
                    })
            
            processed_pairs += 1
            if self.progress_callback:
                self.progress_callback(processed_pairs, total_pairs, "比较文档相似度")
        
        # 收集非重复文档（每组保留一个代表）
        groups = {}
        for i in range(len(self.texts)):
            root = uf.find(i)
            if root not in groups:
                groups[root] = i  # 记录每组的第一个文档作为代表
        
        unique_indices = list(groups.values())
        unique_texts = [self.texts[i] for i in unique_indices]
        
        return unique_texts
        
    def _record_exact_duplicate_pairs(self, duplicate_indices: List[int]):
        """记录精确重复对信息"""
        hash_to_first_index = {}
        
        for i, text_hash in enumerate(self.hashes):
            if text_hash not in hash_to_first_index:
                hash_to_first_index[text_hash] = i
            else:
                # 找到重复对
                first_index = hash_to_first_index[text_hash]
                self.duplicate_pairs.append({
                    "doc1_id": first_index,
                    "doc2_id": i,
                    "similarity": 1.0,  # 精确匹配，相似度为1.0
                    "doc1_content": self.texts[first_index],
                    "doc2_content": self.texts[i],
                    "match_type": "exact"
                })


# 注册MD5去重方法
DedupMethodRegistry.register('md5', MD5Deduper)
