import re
from typing import List, Dict, Any, Optional, Callable, Tuple, Iterable

import mmh3

from .base_deduper import BaseDeduper, DedupMethodRegistry
from .dedupunion import UnionFind


def tokenize(text: str, mode: str = "word", ngram_n: int = 3) -> Iterable[str]:
    """Tokenize input text into features.

    Args:
        text: input string
        mode: 'word' or 'char'
        ngram_n: when mode='char', size of character n-grams
    """
    # normalize: keep letters/numbers and whitespace
    normalized = re.sub(r"[\s]+", " ", text)
    if mode == "char":
        s = re.sub(r"[^\w\s]", "", normalized)
        s = s.replace(" ", "")
        if ngram_n <= 1:
            return list(s)
        return [s[i:i+ngram_n] for i in range(0, max(0, len(s) - ngram_n + 1))]
    else:
        # word tokens, strip punctuation around words
        words = re.findall(r"[\w]+", normalized.lower())
        return words


def hamming_distance(x: int, y: int) -> int:
    return (x ^ y).bit_count()


class SimHashDeduper(BaseDeduper):
    """
    基于SimHash的近似去重算法，适合大规模文本的相似检测
    使用分段倒排索引减少候选对数量，并用汉明距离做精确判定
    """

    DESCRIPTION = "基于SimHash的近似去重算法，支持64/128位，按汉明距离阈值聚类"

    PARAMETERS = {
        'bits': {
            'type': 'int',
            'description': 'SimHash位数（64或128）',
            'range': [32, 128]
        },
        'max_hamming_distance': {
            'type': 'int',
            'description': '最大汉明距离（阈值）',
            'range': [0, 16]
        },
        'tokenizer': {
            'type': 'str',
            'description': '分词方式（word/char）',
            'options': ['word', 'char']
        },
        'ngram_n': {
            'type': 'int',
            'description': '字符n-gram大小（当tokenizer=char生效）',
            'range': [2, 10]
        }
    }

    DEFAULT_PARAMS = {
        'bits': 64,
        'max_hamming_distance': 3,
        'tokenizer': 'word',
        'ngram_n': 3
    }

    def __init__(self, texts: List[str], **kwargs):
        super().__init__(texts, **kwargs)
        self.bits: int = int(kwargs.get('bits', self.DEFAULT_PARAMS['bits']))
        if self.bits not in (32, 64, 128):
            # clamp to supported
            self.bits = 64
        self.max_hamming: int = int(kwargs.get('max_hamming_distance', self.DEFAULT_PARAMS['max_hamming_distance']))
        self.tokenizer: str = kwargs.get('tokenizer', self.DEFAULT_PARAMS['tokenizer'])
        self.ngram_n: int = int(kwargs.get('ngram_n', self.DEFAULT_PARAMS['ngram_n']))

        # Precompute simhash for each text
        self._hashes: List[int] = [self._simhash(t) for t in self.texts]
        self._result: Optional[List[str]] = None

    def _feature_hash(self, token: str) -> int:
        if self.bits == 128:
            # mmh3.hash128 returns signed int; convert to unsigned space via masking
            return mmh3.hash128(token, signed=False)
        elif self.bits == 64:
            return mmh3.hash64(token, signed=False)[0]
        else:  # 32
            return mmh3.hash(token, signed=False) & 0xFFFFFFFF

    def _simhash(self, text: str) -> int:
        feats = tokenize(text, self.tokenizer, self.ngram_n)
        if not feats:
            return 0

        # Initialize weights vector
        vec = [0] * self.bits
        for tok in feats:
            h = self._feature_hash(tok)
            for i in range(self.bits):
                bit = (h >> i) & 1
                vec[i] += 1 if bit else -1
        # Build final hash by sign of weights
        out = 0
        for i in range(self.bits):
            if vec[i] >= 0:
                out |= (1 << i)
        return out

    def _partitions(self) -> List[Tuple[int, int]]:
        """Compute partitions (start, length) for blocking.
        Using k+1 partitions guarantees recall for Hamming <= k.
        """
        k = max(0, self.max_hamming)
        p = min(self.bits, max(1, k + 1))
        base = self.bits // p
        rem = self.bits % p
        parts = []
        start = 0
        for i in range(p):
            length = base + (1 if i < rem else 0)
            parts.append((start, length))
            start += length
        return parts

    def _block_key(self, h: int, start: int, length: int) -> int:
        mask = ((1 << length) - 1) << start
        return (h & mask) >> start

    def _candidate_pairs(self) -> Iterable[Tuple[int, int]]:
        # Build inverted index per partition
        parts = self._partitions()
        inverted: List[Dict[int, List[int]]] = [dict() for _ in parts]
        for idx, h in enumerate(self._hashes):
            for p_idx, (s, l) in enumerate(parts):
                key = self._block_key(h, s, l)
                bucket = inverted[p_idx].setdefault(key, [])
                bucket.append(idx)

        # Emit pairs from buckets
        seen = set()
        for inv in inverted:
            for ids in inv.values():
                if len(ids) < 2:
                    continue
                ids_sorted = sorted(ids)
                for i in range(len(ids_sorted)):
                    for j in range(i + 1, len(ids_sorted)):
                        a, b = ids_sorted[i], ids_sorted[j]
                        key = (a, b)
                        if key not in seen:
                            seen.add(key)
                            yield a, b

    def execute(self) -> List[str]:
        if self._result is not None:
            return self._result

        uf = UnionFind()
        for i in range(len(self.texts)):
            uf.add(i)

        pairs = list(self._candidate_pairs())
        total = len(pairs)
        processed = 0

        for i, j in pairs:
            # Early skip if already in same set
            if uf.find(i) == uf.find(j):
                processed += 1
                if self.progress_callback and processed % 100 == 0:
                    self.progress_callback(processed, total, "比较文档相似度")
                continue

            d = hamming_distance(self._hashes[i], self._hashes[j])
            if d <= self.max_hamming:
                uf.union(i, j)
                self.duplicate_pairs.append({
                    'doc1_id': i,
                    'doc2_id': j,
                    'similarity': max(0.0, 1.0 - d / float(self.bits)),
                    'hamming_distance': d,
                    'doc1_content': self.texts[i],
                    'doc2_content': self.texts[j],
                    'match_type': 'simhash'
                })

            processed += 1
            if self.progress_callback and processed % 100 == 0:
                self.progress_callback(processed, total, "比较文档相似度")

        # Collect one representative per connected component
        reps = {}
        for i in range(len(self.texts)):
            r = uf.find(i)
            if r not in reps:
                reps[r] = i
        self._result = [self.texts[idx] for idx in reps.values()]
        return self._result


# 注册SimHash去重方法
DedupMethodRegistry.register('simhash', SimHashDeduper)
