"""Keyword extraction using TF-IDF algorithm"""

from typing import Dict, Any, Optional, List, Tuple
import jieba
import jieba.analyse
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import logging
from pathlib import Path

from .base_cleaner import BaseCleaner

logger = logging.getLogger(__name__)


class KeywordExtractor(BaseCleaner):
    """Extract keywords from text using TF-IDF and jieba
    
    Supports both Chinese and English text
    """
    
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        """Initialize keyword extractor
        
        Args:
            config: Configuration with options:
                - top_n: Number of keywords to extract (default: 10)
                - use_jieba: Use jieba for Chinese text (default: True)
                - stopwords_file: Path to stopwords file
                - user_dict_file: Path to user dictionary for jieba
                - min_word_length: Minimum word length (default: 2)
        """
        super().__init__(config)
        self.top_n = self.config.get('top_n', 10)
        self.use_jieba = self.config.get('use_jieba', True)
        self.stopwords_file = self.config.get('stopwords_file')
        self.user_dict_file = self.config.get('user_dict_file')
        self.min_word_length = self.config.get('min_word_length', 2)
        
        # Load stopwords
        self.stopwords = self._load_stopwords()
        
        # Load user dictionary for jieba
        if self.use_jieba and self.user_dict_file:
            self._load_user_dict()
        
        # Initialize TF-IDF vectorizer for English
        self.tfidf_vectorizer = TfidfVectorizer(
            max_features=self.top_n * 2,
            stop_words='english',
            ngram_range=(1, 2)
        )
        
        # Statistics
        self.stats = {
            'processed': 0,
            'total_keywords': 0
        }
    
    def _load_stopwords(self) -> set:
        """Load stopwords from file
        
        Returns:
            Set of stopwords
        """
        stopwords = set()
        
        # Default Chinese stopwords
        default_stopwords = {
            '的', '了', '在', '是', '我', '你', '他', '她', '它', '我们', '你们', '他们',
            '这', '那', '这个', '那个', '什么', '为什么', '怎么', '如何', '哪里', '哪儿',
            '和', '与', '或', '但', '但是', '然而', '因为', '所以', '如果', '那么',
            '被', '把', '给', '让', '叫', '去', '来', '到', '从', '对', '向', '往',
            '就', '都', '也', '还', '又', '再', '才', '只', '仅', '很', '太', '非常',
            '吗', '吧', '呢', '啊', '哦', '呀', '嗯', '哈'
        }
        stopwords.update(default_stopwords)
        
        # Load from file if provided
        if self.stopwords_file:
            path = Path(self.stopwords_file)
            if path.exists():
                try:
                    with open(path, 'r', encoding='utf-8') as f:
                        file_stopwords = {line.strip() for line in f if line.strip()}
                        stopwords.update(file_stopwords)
                    logger.info(f"Loaded {len(file_stopwords)} stopwords from file")
                except Exception as e:
                    logger.error(f"Failed to load stopwords: {e}")
        
        return stopwords
    
    def _load_user_dict(self):
        """Load user dictionary for jieba"""
        path = Path(self.user_dict_file)
        if path.exists():
            try:
                jieba.load_userdict(str(path))
                logger.info(f"Loaded jieba user dictionary from {path}")
            except Exception as e:
                logger.error(f"Failed to load user dictionary: {e}")
    
    def clean(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """Extract keywords from text data
        
        Args:
            data: Input data dictionary
            
        Returns:
            Data with extracted keywords
        """
        # Extract text for keyword extraction
        text = self._extract_text_for_keywords(data)
        
        if not text:
            return data
        
        # Extract keywords
        keywords_with_weights = self._extract_keywords(text)
        
        # Add to cleaned data
        if 'cleaned_data' not in data:
            data['cleaned_data'] = {}
        
        data['cleaned_data']['keywords'] = [kw for kw, _ in keywords_with_weights]
        data['cleaned_data']['keywords_with_weights'] = keywords_with_weights
        
        # Update statistics
        self.stats['processed'] += 1
        self.stats['total_keywords'] += len(keywords_with_weights)
        
        return data
    
    def _extract_text_for_keywords(self, data: Dict[str, Any]) -> str:
        """Extract text content for keyword extraction
        
        Args:
            data: Data dictionary
            
        Returns:
            Combined text content
        """
        text_parts = []
        
        # Title (higher weight)
        if 'title' in data and data['title']:
            # Repeat title to give it more weight
            text_parts.extend([str(data['title'])] * 3)
        
        # Main content
        if 'content' in data and data['content']:
            if isinstance(data['content'], str):
                text_parts.append(data['content'])
            elif isinstance(data['content'], dict):
                if 'description' in data['content']:
                    text_parts.append(str(data['content']['description']))
                if 'text' in data['content']:
                    text_parts.append(str(data['content']['text']))
                if 'body' in data['content']:
                    text_parts.append(str(data['content']['body']))
        
        # Description
        if 'description' in data and data['description']:
            text_parts.append(str(data['description']))
        
        # Tags/categories (if present)
        if 'tags' in data and data['tags']:
            if isinstance(data['tags'], list):
                text_parts.extend(data['tags'])
            else:
                text_parts.append(str(data['tags']))
        
        return ' '.join(text_parts)
    
    def _extract_keywords(self, text: str) -> List[Tuple[str, float]]:
        """Extract keywords with weights from text
        
        Args:
            text: Input text
            
        Returns:
            List of (keyword, weight) tuples
        """
        if not text:
            return []
        
        # Detect language (simple heuristic)
        has_chinese = any('\u4e00' <= char <= '\u9fff' for char in text)
        
        if has_chinese and self.use_jieba:
            return self._extract_chinese_keywords(text)
        else:
            return self._extract_english_keywords(text)
    
    def _extract_chinese_keywords(self, text: str) -> List[Tuple[str, float]]:
        """Extract keywords from Chinese text using jieba
        
        Args:
            text: Chinese text
            
        Returns:
            List of (keyword, weight) tuples
        """
        try:
            # Use jieba's TF-IDF implementation
            keywords = jieba.analyse.extract_tags(
                text,
                topK=self.top_n,
                withWeight=True,
                allowPOS=('n', 'v', 'a', 'ns', 'nr', 'nt')  # Nouns, verbs, adjectives
            )
            
            # Filter by stopwords and length
            filtered_keywords = []
            for word, weight in keywords:
                if (word not in self.stopwords and 
                    len(word) >= self.min_word_length):
                    filtered_keywords.append((word, round(weight, 4)))
            
            # Also try TextRank for comparison
            textrank_keywords = jieba.analyse.textrank(
                text,
                topK=self.top_n // 2,
                withWeight=True
            )
            
            # Merge results (TF-IDF has priority)
            keyword_dict = {kw: w for kw, w in filtered_keywords}
            for word, weight in textrank_keywords:
                if word not in keyword_dict and word not in self.stopwords:
                    keyword_dict[word] = round(weight * 0.8, 4)  # Lower weight for TextRank
            
            # Sort by weight and return top N
            sorted_keywords = sorted(
                keyword_dict.items(),
                key=lambda x: x[1],
                reverse=True
            )[:self.top_n]
            
            return sorted_keywords
            
        except Exception as e:
            logger.error(f"Error extracting Chinese keywords: {e}")
            return []
    
    def _extract_english_keywords(self, text: str) -> List[Tuple[str, float]]:
        """Extract keywords from English text using sklearn
        
        Args:
            text: English text
            
        Returns:
            List of (keyword, weight) tuples
        """
        try:
            # Tokenize and vectorize
            tfidf_matrix = self.tfidf_vectorizer.fit_transform([text])
            
            # Get feature names and scores
            feature_names = self.tfidf_vectorizer.get_feature_names_out()
            scores = tfidf_matrix.toarray()[0]
            
            # Create keyword-score pairs
            keyword_scores = []
            for i, score in enumerate(scores):
                if score > 0:
                    keyword = feature_names[i]
                    # Filter by length and stopwords
                    if (len(keyword) >= self.min_word_length and
                        keyword.lower() not in self.stopwords):
                        keyword_scores.append((keyword, round(score, 4)))
            
            # Sort by score and return top N
            keyword_scores.sort(key=lambda x: x[1], reverse=True)
            return keyword_scores[:self.top_n]
            
        except Exception as e:
            logger.error(f"Error extracting English keywords: {e}")
            return []
    
    def get_stats(self) -> Dict[str, Any]:
        """Get extraction statistics
        
        Returns:
            Statistics dictionary
        """
        stats = super().get_stats()
        stats['extraction'] = self.stats.copy()
        if self.stats['processed'] > 0:
            stats['extraction']['avg_keywords'] = (
                self.stats['total_keywords'] / self.stats['processed']
            )
        return stats