"""
SimHash algorithm implementation for text similarity detection.
Optimized for Chinese text processing.
"""
import hashlib
import jieba
import numpy as np
from typing import List, Tuple, Optional
from collections import Counter
import re


class SimHash:
    """SimHash algorithm implementation with Chinese text optimization."""
    
    def __init__(self, hash_bits: int = 64):
        """
        Initialize SimHash with specified hash bits.
        
        Args:
            hash_bits: Number of bits for hash (default 64)
        """
        self.hash_bits = hash_bits
        self._stop_words = self._load_stop_words()
    
    def _load_stop_words(self) -> set:
        """Load Chinese stop words."""
        # Common Chinese stop words
        return {
            '的', '了', '在', '是', '我', '你', '他', '她', '它', '们',
            '这', '那', '有', '个', '和', '与', '或', '但', '如果', '因为',
            '所以', '虽然', '然而', '不过', '就', '都', '也', '还', '很',
            '非常', '最', '更', '比', '被', '把', '给', '让', '对', '为',
            '到', '从', '由', '于', '上', '下', '中', '里', '内', '外'
        }
    
    def _preprocess_text(self, text: str) -> List[str]:
        """
        Preprocess text: segmentation, stop word removal, normalization.
        
        Args:
            text: Input text
            
        Returns:
            List of processed tokens
        """
        # Remove special characters and extra spaces
        text = re.sub(r'[^\w\s\u4e00-\u9fff]+', ' ', text)
        text = re.sub(r'\s+', ' ', text)
        text = text.strip().lower()
        
        # Chinese word segmentation
        tokens = list(jieba.cut(text))
        
        # Remove stop words and filter short tokens
        tokens = [
            token.strip() 
            for token in tokens 
            if token.strip() and token.strip() not in self._stop_words
            and len(token.strip()) > 0
        ]
        
        return tokens
    
    def _hash_token(self, token: str) -> int:
        """
        Generate hash value for a token.
        
        Args:
            token: Token to hash
            
        Returns:
            Hash value as integer
        """
        hash_obj = hashlib.md5(token.encode('utf-8'))
        return int(hash_obj.hexdigest(), 16)
    
    def _extract_features(self, tokens: List[str]) -> List[Tuple[int, int]]:
        """
        Extract weighted features from tokens.
        
        Args:
            tokens: List of tokens
            
        Returns:
            List of (hash, weight) tuples
        """
        # Count token frequencies for weighting
        token_counts = Counter(tokens)
        features = []
        
        for token, weight in token_counts.items():
            hash_value = self._hash_token(token)
            features.append((hash_value, weight))
        
        return features
    
    def generate(self, text: str) -> str:
        """
        Generate SimHash fingerprint for text.
        
        Args:
            text: Input text
            
        Returns:
            64-bit SimHash value as hex string
        """
        if not text or not text.strip():
            return '0' * (self.hash_bits // 4)
        
        # Preprocess and extract features
        tokens = self._preprocess_text(text)
        if not tokens:
            return '0' * (self.hash_bits // 4)
        
        features = self._extract_features(tokens)
        
        # Initialize hash vector
        hash_vector = np.zeros(self.hash_bits, dtype=int)
        
        # Calculate weighted hash
        for hash_value, weight in features:
            for i in range(self.hash_bits):
                bit = 1 if (hash_value >> i) & 1 else 0
                if bit:
                    hash_vector[i] += weight
                else:
                    hash_vector[i] -= weight
        
        # Generate final fingerprint
        fingerprint = 0
        for i in range(self.hash_bits):
            if hash_vector[i] > 0:
                fingerprint |= (1 << i)
        
        # Convert to hex string with proper padding
        hex_str = hex(fingerprint)[2:]
        return hex_str.zfill(self.hash_bits // 4)
    
    @staticmethod
    def hamming_distance(hash1: str, hash2: str) -> int:
        """
        Calculate Hamming distance between two hash values.
        
        Args:
            hash1: First hash value (hex string)
            hash2: Second hash value (hex string)
            
        Returns:
            Hamming distance
        """
        # Convert hex strings to integers
        int1 = int(hash1, 16)
        int2 = int(hash2, 16)
        
        # XOR and count different bits
        xor = int1 ^ int2
        distance = 0
        
        while xor:
            distance += xor & 1
            xor >>= 1
        
        return distance
    
    def is_similar(self, hash1: str, hash2: str, threshold: int = 5) -> bool:
        """
        Check if two hashes are similar based on Hamming distance.
        
        Args:
            hash1: First hash value
            hash2: Second hash value
            threshold: Maximum Hamming distance for similarity (default 5)
            
        Returns:
            True if similar, False otherwise
        """
        distance = self.hamming_distance(hash1, hash2)
        return distance <= threshold
    
    def calculate_similarity(self, hash1: str, hash2: str) -> float:
        """
        Calculate similarity percentage between two hashes.
        
        Args:
            hash1: First hash value
            hash2: Second hash value
            
        Returns:
            Similarity percentage (0-100)
        """
        distance = self.hamming_distance(hash1, hash2)
        similarity = (1 - distance / self.hash_bits) * 100
        return max(0, similarity)


class TextSegmenter:
    """Utility class for text segmentation."""
    
    @staticmethod
    def segment_by_paragraph(text: str) -> List[str]:
        """
        Segment text by paragraphs.
        
        Args:
            text: Input text
            
        Returns:
            List of paragraphs
        """
        # Split by double newline or multiple spaces
        paragraphs = re.split(r'\n\n+|\r\n\r\n+', text)
        # Clean and filter empty paragraphs
        return [p.strip() for p in paragraphs if p.strip()]
    
    @staticmethod
    def segment_by_sentence(text: str) -> List[str]:
        """
        Segment text by sentences.
        
        Args:
            text: Input text
            
        Returns:
            List of sentences
        """
        # Chinese and English sentence delimiters
        sentences = re.split(r'[。！？.!?]+', text)
        # Clean and filter empty sentences
        return [s.strip() for s in sentences if s.strip()]
    
    @staticmethod
    def sliding_window(text: str, window_size: int = 200, step: int = 100) -> List[str]:
        """
        Create sliding window segments.
        
        Args:
            text: Input text
            window_size: Size of each window (characters)
            step: Step size for sliding
            
        Returns:
            List of text segments
        """
        segments = []
        text_len = len(text)
        
        for i in range(0, text_len, step):
            segment = text[i:i + window_size]
            if segment:
                segments.append(segment)
            if i + window_size >= text_len:
                break
        
        return segments