import re
import mmh3
import numpy as np
from collections import defaultdict
from typing import List, Dict, Any, Optional, Callable
from .base_deduper import BaseDeduper, DedupMethodRegistry
from .dedupunion import UnionFind


def split(source: str):
    """定义如何拆分文本"""
    return list(source)


def join(source: List[str]):
    """定义如何连接 n-gram"""
    return "".join(source)


class Document(object):
    """
    优化的文档对象，减少内存占用
    """
    def __init__(self, id: int, content: str, ngram_n=3, band_size=2, signature_len=100):
        self.id = id
        self._content = content
        self.band_size = band_size
        self.ngram_n = ngram_n
        self.signature_len = signature_len  # MinHash签名长度
        self._ngrams = None
        self._min_hash = None
        self._lsh_bands = None

    @property
    def content(self):
        return self._content

    @property
    def ngrams(self):
        """
        获取文档的 n 元词袋表示（延迟计算）
        """
        if self._ngrams is None:
            self._ngrams = self.split_ngrams()
        return self._ngrams

    @property
    def min_hash(self):
        """
        获取文档的 MinHash 表示（延迟计算）
        """
        if self._min_hash is None:
            self._min_hash = self.calc_min_hash()
        return self._min_hash

    @property
    def lsh_bands(self):
        """
        获取文档的 LSH 带（延迟计算）
        """
        if self._lsh_bands is None:
            self._lsh_bands = self.prepare_lsh_bands()
        return self._lsh_bands

    def prepare_lsh_bands(self):
        """
        准备 LSH 带
        """
        min_hash = self.min_hash
        band_count = len(min_hash) // self.band_size
        lsh_bands = []
        for i in range(band_count):
            band = min_hash[i*self.band_size:(i+1)*self.band_size]
            lsh_bands.append("-".join(map(str, band)))
        return lsh_bands

    def calc_similarity(self, rhs):
        """
        计算两个文档之间的 jaccard 相似度（优化版）
        """
        # 使用 MinHash 近似计算 Jaccard 相似度
        set1 = set(self.min_hash)
        set2 = set(rhs.min_hash)
        intersection = len(set1 & set2)
        return intersection / self.signature_len

    def split_ngrams(self) -> List[str]:
        """
        拆分 n 元词袋（优化内存使用）
        """
        # 仅保留字母数字字符
        no_symbols = re.sub(r'[^\w\s]', '', self._content)
        parts = split(no_symbols)
        ngrams = []
        n = self.ngram_n
        
        # 使用生成器表达式减少内存占用
        for i in range(len(parts) - (n - 1)):
            ngrams.append(join(parts[i:i+n]))
        
        if len(ngrams) == 0:
            raise Exception(
                f"[ERROR] 无法为数据[{self.id}] 生成 {n}-grams")
        return ngrams

    def calc_min_hash(self) -> List[int]:
        """
        优化后的 MinHash 计算
        使用 murmurhash3 替代排列组合方法
        """
        signature = [np.iinfo(np.uint32).max] * self.signature_len
        
        for ngram in self.ngrams:
            for i in range(self.signature_len):
                # 使用不同种子生成独立哈希
                h = mmh3.hash(ngram, seed=i, signed=False)
                if h < signature[i]:
                    signature[i] = h
        return signature


class LSH(object):
    """
    优化的 LSH 实现
    """
    def __init__(self):
        self._buckets = None

    def build(self, docs: List[Document], progress_callback: Optional[Callable] = None):
        """
        构建 LSH 桶（优化内存和性能）
        """
        if self._buckets is not None:
            return self._buckets
        
        # 使用字典记录每个band对应的文档ID
        band_to_docs = defaultdict(set)
        
        # 并行处理文档的bands
        total_docs = len(docs)
        for i, doc in enumerate(docs):
            for band in doc.lsh_bands:
                band_to_docs[band].add(doc.id)
            
            # 进度回调
            if progress_callback:
                progress_callback(i + 1, total_docs, "处理文档LSH bands")
        
        # 过滤出有多个文档的band
        candidate_buckets = [list(doc_ids) for doc_ids in band_to_docs.values() if len(doc_ids) > 1]
        
        # 去重桶（避免重复比较）
        seen_buckets = set()
        unique_buckets = []
        
        for bucket in candidate_buckets:
            bucket_key = tuple(sorted(bucket))
            if bucket_key not in seen_buckets:
                seen_buckets.add(bucket_key)
                unique_buckets.append(bucket)
        
        self._buckets = unique_buckets
        return self._buckets


class MinHashDeduper(BaseDeduper):
    """
    MinHash + LSH 去重算法
    """
    
    # 算法描述
    DESCRIPTION = "基于MinHash和LSH的近似去重算法，适用于大规模文本数据"
    
    # 算法参数定义
    PARAMETERS = {
        'sim_threshold': {
            'type': 'float',
            'description': '相似度阈值',
            'range': [0.1, 1.0],
            'step': 0.05
        },
        'ngram_n': {
            'type': 'int',
            'description': 'N-gram大小',
            'range': [2, 10]
        },
        'band_size': {
            'type': 'int',
            'description': 'LSH band大小',
            'range': [1, 20]
        },
        'signature_len': {
            'type': 'int',
            'description': 'MinHash签名长度',
            'range': [50, 500]
        }
    }
    
    # 默认参数
    DEFAULT_PARAMS = {
        'sim_threshold': 0.8,
        'ngram_n': 3,
        'band_size': 3,
        'signature_len': 100
    }
    
    def __init__(self, texts: List[str], **kwargs):
        super().__init__(texts, **kwargs)
        
        # 提取参数
        self.sim_threshold = kwargs.get('sim_threshold', self.DEFAULT_PARAMS['sim_threshold'])
        self.ngram_n = kwargs.get('ngram_n', self.DEFAULT_PARAMS['ngram_n'])
        self.band_size = kwargs.get('band_size', self.DEFAULT_PARAMS['band_size'])
        self.signature_len = kwargs.get('signature_len', self.DEFAULT_PARAMS['signature_len'])
        
        # 检查参数有效性
        if self.band_size > self.signature_len:
            raise ValueError(f"band_size ({self.band_size}) 不能大于 signature_len ({self.signature_len})")
        
        # 分批处理文档避免内存爆炸
        batch_size = 3000
        self.docs = []
        
        total_docs = len(texts)
        for i in range(0, total_docs, batch_size):
            batch_texts = texts[i:i+batch_size]
            batch_objects = [
                Document(idx + i, doc, self.ngram_n, self.band_size, self.signature_len)
                for idx, doc in enumerate(batch_texts)
            ]
            self.docs.extend(batch_objects)
        
        self.lsh = LSH()
        self.duplicated = set()
        self._result = None

    def execute(self) -> List[str]:
        if self._result is not None:
            return self._result
        
        # 构建 LSH 桶
        buckets = self.lsh.build(self.docs, self.progress_callback)
        
        # 处理每个桶
        total_buckets = len(buckets)
        for i, bucket in enumerate(buckets):
            self._process_bucket(bucket)
            
            # 进度回调
            if self.progress_callback:
                self.progress_callback(i + 1, total_buckets, "处理相似文档桶")
        
        # 收集非重复文档
        self._result = [doc.content for doc in self.docs if doc.id not in self.duplicated]
        return self._result

    def _process_bucket(self, bucket):
        """处理单个桶中的文档，使用并查集分组并记录所有相似对"""
        bucket_docs = [(doc_id, self.docs[doc_id]) for doc_id in bucket]
        bucket_docs.sort(key=lambda x: len(x[1].content))  # 按长度排序（可选优化）

        # 初始化并查集
        uf = UnionFind()
        for doc_id in bucket:
            uf.add(doc_id)

        # 记录所有相似对（包括中间结果）
        candidate_pairs = []

        # 第一阶段：两两比较，记录所有相似对
        total_comparisons = len(bucket_docs) * (len(bucket_docs) - 1) // 2
        comparison_count = 0
        
        for i, (i_id, i_doc) in enumerate(bucket_docs):
            for j in range(i + 1, len(bucket_docs)):
                j_id, j_doc = bucket_docs[j]
                sim = i_doc.calc_similarity(j_doc)
                if sim >= self.sim_threshold:
                    candidate_pairs.append((i_id, j_id, sim))
                    uf.union(i_id, j_id)  # 合并相似文档
                
                comparison_count += 1
                # 每100次比较更新一次进度
                if self.progress_callback and comparison_count % 100 == 0:
                    self.progress_callback(comparison_count, total_comparisons, "比较文档相似度")

        # 第二阶段：生成最终分组和去重记录
        groups = defaultdict(list)
        for doc_id in bucket:
            root = uf.find(doc_id)
            groups[root].append(doc_id)

        # 记录所有相似对（排除完全相同的文档）
        for i_id, j_id, sim in candidate_pairs:
            if i_id == j_id:
                continue  # 跳过完全相同的文档对（如果不需要）
            
            # 确保不重复记录（可选）
            if not self._is_pair_recorded(i_id, j_id):
                self.duplicate_pairs.append({
                    "doc1_id": i_id,
                    "doc2_id": j_id,
                    "similarity": sim,
                    "doc1_content": self.docs[i_id].content,
                    "doc2_content": self.docs[j_id].content
                })

        # 标记重复文档（每组保留一个代表）
        for group in groups.values():
            if len(group) > 1:
                representative = group[0]
                for doc_id in group[1:]:
                    self.duplicated.add(doc_id)

    def _is_pair_recorded(self, id1, id2):
        """检查是否已记录过该文档对（避免重复）"""
        for pair in self.duplicate_pairs:
            if (pair["doc1_id"] == id1 and pair["doc2_id"] == id2) or \
            (pair["doc1_id"] == id2 and pair["doc2_id"] == id1):
                return True
        return False


# 注册MinHash去重方法
DedupMethodRegistry.register('minhash', MinHashDeduper)
