"""自然语言处理分析器

提供文本分析、情感分析、主题建模和关键词提取功能。
"""

import re
import logging
from typing import List, Dict, Any, Optional, Tuple
from collections import Counter
import pandas as pd
import numpy as np

try:
    from textblob import TextBlob
    from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
    from sklearn.decomposition import LatentDirichletAllocation
    from sklearn.cluster import KMeans
    from wordcloud import WordCloud
    import jieba
    import jieba.analyse
    NLP_DEPENDENCIES_AVAILABLE = True
except ImportError as e:
    logging.warning(f"某些NLP依赖包未安装: {e}")
    logging.warning("请运行: pip install textblob scikit-learn wordcloud jieba")
    NLP_DEPENDENCIES_AVAILABLE = False
    # 创建占位符类以避免运行时错误
    class TextBlob:
        def __init__(self, text): pass
        @property
        def sentiment(self): return type('obj', (object,), {'polarity': 0, 'subjectivity': 0})()
    class CountVectorizer:
        def __init__(self, **kwargs): pass
        def fit_transform(self, data): return []
    class LatentDirichletAllocation:
        def __init__(self, **kwargs): pass
        def fit_transform(self, data): return []

class NLPAnalyzer:
    """自然语言处理分析器"""
    
    def __init__(self, language: str = 'chinese'):
        """
        初始化NLP分析器
        
        Args:
            language: 语言类型 ('chinese', 'english')
        """
        self.language = language
        self.logger = logging.getLogger(__name__)
    
    def analyze(self, data, text_columns: Optional[List[str]] = None, enable_sentiment: bool = False) -> Dict[str, Any]:
        """统一的分析接口
        
        Args:
            data: 输入数据（DataFrame或文本列表）
            text_columns: 文本列名列表
            enable_sentiment: 是否启用情感分析
            
        Returns:
            NLP分析结果
        """
        try:
            if isinstance(data, pd.DataFrame):
                # 如果是DataFrame，提取文本列
                if text_columns is None:
                    text_columns = data.select_dtypes(include=['object']).columns.tolist()
                
                text_data = []
                for col in text_columns:
                    if col in data.columns:
                        text_data.extend(data[col].dropna().astype(str).tolist())
            else:
                # 如果是文本列表
                text_data = data if isinstance(data, list) else [str(data)]
            
            if not text_data:
                return {"error": "没有找到可分析的文本数据"}
            
            result = {
                "success": True,
                "text_analysis": self.analyze_text_responses(text_data),
                "topic_modeling": self.topic_modeling(text_data, n_topics=min(5, len(text_data)))
            }
            
            # 只有在明确启用时才执行情感分析
            if enable_sentiment:
                result["sentiment_analysis"] = self.sentiment_analysis(text_data)
            
            self.logger.info(f"NLP分析完成，分析了 {len(text_data)} 条文本")
            return result
            
        except Exception as e:
            self.logger.error(f"NLP分析失败: {str(e)}")
            return {"error": f"NLP分析失败: {str(e)}"}
        
        # 中文停用词
        self.chinese_stopwords = {
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个',
            '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好',
            '自己', '这', '那', '他', '她', '它', '们', '这个', '那个', '什么', '怎么',
            '为什么', '因为', '所以', '但是', '然后', '还是', '或者', '如果', '虽然',
            '可以', '应该', '能够', '需要', '希望', '觉得', '认为', '知道', '想要'
        }
        
        # 英文停用词
        self.english_stopwords = {
            'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
            'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
            'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
            'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
            'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
            'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
            'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
            'at', 'by', 'for', 'with', 'through', 'during', 'before', 'after', 'above',
            'below', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
            'further', 'then', 'once'
        }
    
    def preprocess_text(self, text: str) -> str:
        """
        文本预处理
        
        Args:
            text: 原始文本
            
        Returns:
            预处理后的文本
        """
        if not isinstance(text, str):
            return ""
        
        # 转换为小写
        text = text.lower()
        
        # 移除特殊字符和数字
        if self.language == 'chinese':
            # 保留中文字符、英文字母和空格
            text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z\s]', '', text)
        else:
            # 保留英文字母和空格
            text = re.sub(r'[^a-zA-Z\s]', '', text)
        
        # 移除多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def tokenize_text(self, text: str) -> List[str]:
        """
        文本分词
        
        Args:
            text: 预处理后的文本
            
        Returns:
            分词结果列表
        """
        if self.language == 'chinese':
            # 中文分词
            try:
                words = jieba.lcut(text)
                # 过滤停用词和短词
                words = [word for word in words 
                        if len(word) > 1 and word not in self.chinese_stopwords]
            except:
                # 如果jieba不可用，简单按字符分割
                words = [char for char in text if char not in self.chinese_stopwords]
        else:
            # 英文分词
            words = text.split()
            words = [word for word in words 
                    if len(word) > 2 and word not in self.english_stopwords]
        
        return words
    
    def analyze_text_responses(self, text_data: List[str]) -> Dict[str, Any]:
        """
        分析开放式问题的文本回答
        
        Args:
            text_data: 文本数据列表
            
        Returns:
            文本分析结果
        """
        try:
            # 预处理文本
            processed_texts = [self.preprocess_text(text) for text in text_data]
            processed_texts = [text for text in processed_texts if text]  # 过滤空文本
            
            if not processed_texts:
                return {'error': '没有有效的文本数据'}
            
            # 分词
            all_words = []
            for text in processed_texts:
                words = self.tokenize_text(text)
                all_words.extend(words)
            
            # 词频统计
            word_freq = Counter(all_words)
            top_words = word_freq.most_common(20)
            
            # 文本长度统计
            text_lengths = [len(text.split()) for text in processed_texts]
            
            # 关键词提取
            keywords = self.extract_keywords(processed_texts)
            
            # 主题建模
            topics = self.topic_modeling(processed_texts)
            
            return {
                'total_responses': len(text_data),
                'valid_responses': len(processed_texts),
                'total_words': len(all_words),
                'unique_words': len(word_freq),
                'top_words': top_words,
                'avg_text_length': np.mean(text_lengths),
                'text_length_stats': {
                    'min': min(text_lengths),
                    'max': max(text_lengths),
                    'std': np.std(text_lengths)
                },
                'keywords': keywords,
                'topics': topics
            }
            
        except Exception as e:
            self.logger.error(f"文本分析失败: {e}")
            return {'error': str(e)}
    
    def sentiment_analysis(self, text_data: List[str]) -> Dict[str, Any]:
        """
        情感分析
        
        Args:
            text_data: 文本数据列表
            
        Returns:
            情感分析结果
        """
        if not NLP_DEPENDENCIES_AVAILABLE:
            return {
                'error': 'NLP依赖库不可用，无法进行情感分析',
                'fallback_message': '请安装textblob等依赖库以启用情感分析功能'
            }
            
        try:
            sentiments = []
            polarities = []
            subjectivities = []
            
            for text in text_data:
                if not isinstance(text, str) or not text.strip():
                    continue
                
                try:
                    # 使用TextBlob进行情感分析
                    blob = TextBlob(text)
                    polarity = blob.sentiment.polarity  # -1 (负面) 到 1 (正面)
                    subjectivity = blob.sentiment.subjectivity  # 0 (客观) 到 1 (主观)
                    
                    # 分类情感
                    if polarity > 0.1:
                        sentiment = 'positive'
                    elif polarity < -0.1:
                        sentiment = 'negative'
                    else:
                        sentiment = 'neutral'
                    
                    sentiments.append(sentiment)
                    polarities.append(polarity)
                    subjectivities.append(subjectivity)
                    
                except Exception as e:
                    self.logger.warning(f"单个文本情感分析失败: {e}")
                    continue
            
            if not sentiments:
                return {'error': '没有有效的情感分析结果'}
            
            # 统计结果
            sentiment_counts = Counter(sentiments)
            
            return {
                'total_analyzed': len(sentiments),
                'sentiment_distribution': dict(sentiment_counts),
                'sentiment_percentages': {
                    sentiment: count / len(sentiments) * 100
                    for sentiment, count in sentiment_counts.items()
                },
                'avg_polarity': np.mean(polarities),
                'avg_subjectivity': np.mean(subjectivities),
                'polarity_stats': {
                    'min': min(polarities),
                    'max': max(polarities),
                    'std': np.std(polarities)
                },
                'subjectivity_stats': {
                    'min': min(subjectivities),
                    'max': max(subjectivities),
                    'std': np.std(subjectivities)
                }
            }
            
        except Exception as e:
            self.logger.error(f"情感分析失败: {e}")
            return {'error': str(e)}
    
    def topic_modeling(self, text_data: List[str], n_topics: int = 5) -> Dict[str, Any]:
        """
        主题建模
        
        Args:
            text_data: 文本数据列表
            n_topics: 主题数量
            
        Returns:
            主题建模结果
        """
        if not NLP_DEPENDENCIES_AVAILABLE:
            return {
                'error': 'NLP依赖库不可用，无法进行主题建模',
                'fallback_message': '请安装scikit-learn等依赖库以启用主题建模功能'
            }
            
        try:
            if len(text_data) < n_topics:
                return {'error': f'文本数量({len(text_data)})少于主题数量({n_topics})'}
            
            # 预处理文本
            processed_texts = [self.preprocess_text(text) for text in text_data]
            processed_texts = [text for text in processed_texts if text]
            
            if len(processed_texts) < n_topics:
                return {'error': '有效文本数量不足'}
            
            # 向量化
            vectorizer = CountVectorizer(
                max_features=100,
                stop_words='english' if self.language == 'english' else None,
                ngram_range=(1, 2)
            )
            
            doc_term_matrix = vectorizer.fit_transform(processed_texts)
            
            # LDA主题建模
            lda = LatentDirichletAllocation(
                n_components=n_topics,
                random_state=42,
                max_iter=10
            )
            
            lda.fit(doc_term_matrix)
            
            # 提取主题词
            feature_names = vectorizer.get_feature_names_out()
            topics = []
            
            for topic_idx, topic in enumerate(lda.components_):
                top_words_idx = topic.argsort()[-10:][::-1]
                top_words = [feature_names[i] for i in top_words_idx]
                top_weights = [float(topic[i]) for i in top_words_idx]
                
                topics.append({
                    'topic_id': topic_idx,
                    'words': top_words,
                    'weights': top_weights
                })
            
            # 文档主题分布
            doc_topic_dist = lda.transform(doc_term_matrix)
            
            return {
                'n_topics': n_topics,
                'topics': topics,
                'perplexity': lda.perplexity(doc_term_matrix),
                'log_likelihood': lda.score(doc_term_matrix),
                'doc_topic_distribution': doc_topic_dist.tolist()
            }
            
        except Exception as e:
            self.logger.error(f"主题建模失败: {e}")
            return {'error': str(e)}
    
    def extract_keywords(self, text_data: List[str], top_k: int = 10) -> List[Tuple[str, float]]:
        """
        关键词提取
        
        Args:
            text_data: 文本数据列表
            top_k: 返回前k个关键词
            
        Returns:
            关键词及其权重列表
        """
        try:
            # 合并所有文本
            combined_text = ' '.join(text_data)
            
            if self.language == 'chinese':
                try:
                    # 使用jieba的TF-IDF关键词提取
                    keywords = jieba.analyse.extract_tags(
                        combined_text, 
                        topK=top_k, 
                        withWeight=True
                    )
                    return keywords
                except:
                    pass
            
            # 使用TF-IDF提取关键词
            vectorizer = TfidfVectorizer(
                max_features=top_k * 2,
                stop_words='english' if self.language == 'english' else None,
                ngram_range=(1, 2)
            )
            
            tfidf_matrix = vectorizer.fit_transform([combined_text])
            feature_names = vectorizer.get_feature_names_out()
            tfidf_scores = tfidf_matrix.toarray()[0]
            
            # 获取top_k关键词
            top_indices = tfidf_scores.argsort()[-top_k:][::-1]
            keywords = [(feature_names[i], float(tfidf_scores[i])) for i in top_indices]
            
            return keywords
            
        except Exception as e:
            self.logger.error(f"关键词提取失败: {e}")
            return []
    
    def generate_wordcloud_data(self, text_data: List[str]) -> Dict[str, Any]:
        """
        生成词云数据
        
        Args:
            text_data: 文本数据列表
            
        Returns:
            词云数据
        """
        try:
            # 预处理和分词
            all_words = []
            for text in text_data:
                words = self.tokenize_text(self.preprocess_text(text))
                all_words.extend(words)
            
            if not all_words:
                return {'error': '没有有效的词汇数据'}
            
            # 词频统计
            word_freq = Counter(all_words)
            
            # 过滤低频词
            min_freq = max(1, len(all_words) // 100)  # 至少出现1%的频率
            filtered_freq = {word: freq for word, freq in word_freq.items() 
                           if freq >= min_freq}
            
            return {
                'word_frequencies': filtered_freq,
                'total_words': len(all_words),
                'unique_words': len(word_freq),
                'filtered_words': len(filtered_freq)
            }
            
        except Exception as e:
            self.logger.error(f"词云数据生成失败: {e}")
            return {'error': str(e)}
    
    def text_similarity_analysis(self, text_data: List[str]) -> Dict[str, Any]:
        """
        文本相似性分析
        
        Args:
            text_data: 文本数据列表
            
        Returns:
            相似性分析结果
        """
        try:
            if len(text_data) < 2:
                return {'error': '需要至少2个文本进行相似性分析'}
            
            # 预处理文本
            processed_texts = [self.preprocess_text(text) for text in text_data]
            processed_texts = [text for text in processed_texts if text]
            
            if len(processed_texts) < 2:
                return {'error': '有效文本数量不足'}
            
            # TF-IDF向量化
            vectorizer = TfidfVectorizer(
                stop_words='english' if self.language == 'english' else None
            )
            
            tfidf_matrix = vectorizer.fit_transform(processed_texts)
            
            # 计算余弦相似度
            from sklearn.metrics.pairwise import cosine_similarity
            similarity_matrix = cosine_similarity(tfidf_matrix)
            
            # 聚类分析
            if len(processed_texts) >= 3:
                n_clusters = min(5, len(processed_texts) // 2)
                kmeans = KMeans(n_clusters=n_clusters, random_state=42)
                clusters = kmeans.fit_predict(tfidf_matrix)
                
                cluster_info = {}
                for i, cluster_id in enumerate(clusters):
                    if cluster_id not in cluster_info:
                        cluster_info[cluster_id] = []
                    cluster_info[cluster_id].append(i)
            else:
                clusters = None
                cluster_info = {}
            
            return {
                'similarity_matrix': similarity_matrix.tolist(),
                'avg_similarity': np.mean(similarity_matrix[np.triu_indices_from(similarity_matrix, k=1)]),
                'max_similarity': np.max(similarity_matrix[np.triu_indices_from(similarity_matrix, k=1)]),
                'min_similarity': np.min(similarity_matrix[np.triu_indices_from(similarity_matrix, k=1)]),
                'clusters': clusters.tolist() if clusters is not None else None,
                'cluster_info': cluster_info,
                'n_clusters': len(cluster_info) if cluster_info else 0
            }
            
        except Exception as e:
            self.logger.error(f"文本相似性分析失败: {e}")
            return {'error': str(e)}