"""
关键词分类模块
用于对清洗后的关键词进行分类和聚类分析
"""
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans, DBSCAN
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report, silhouette_score
import jieba
import jieba.analyse
from collections import Counter, defaultdict
import logging
from typing import List, Dict, Tuple, Set
import pickle
import json
import numpy as np


class KeywordClassifier:
    """关键词分类器"""
    
    def __init__(self, config: Dict):
        """
        初始化分类器
        
        Args:
            config: 配置字典
        """
        self.config = config
        self.classification_config = config.get('classification', {})
        self.logger = logging.getLogger(__name__)
        
        # 初始化TF-IDF向量器
        tfidf_config = self.classification_config.get('tfidf', {})
        self.tfidf_vectorizer = TfidfVectorizer(
            max_features=tfidf_config.get('max_features', 5000),
            min_df=tfidf_config.get('min_df', 2),
            max_df=tfidf_config.get('max_df', 0.8),
            ngram_range=tuple(tfidf_config.get('ngram_range', [1, 2])),
            token_pattern=r'(?u)\b\w+\b',  # 支持中文
            analyzer='word'
        )
        
        # 初始化聚类器
        clustering_config = self.classification_config.get('clustering', {})
        self.n_clusters = clustering_config.get('n_clusters', 10)
        self.clustering_algorithm = clustering_config.get('algorithm', 'kmeans')
        
        # 初始化分类器
        classifier_config = self.classification_config.get('classifier', {})
        self.classifier_algorithm = classifier_config.get('algorithm', 'random_forest')
        
        # 预定义分类
        self.predefined_categories = self._load_predefined_categories()
    
    def _load_predefined_categories(self) -> Dict[str, List[str]]:
        """
        加载预定义分类
        
        Returns:
            分类字典
        """
        categories = {
            '技术编程': [
                'python', 'java', 'javascript', '编程', '开发', '代码', '算法',
                '数据结构', '框架', '库', 'api', '数据库', 'sql', 'html', 'css'
            ],
            '数据科学': [
                '数据分析', '机器学习', '深度学习', '人工智能', '数据挖掘',
                '统计', '可视化', '大数据', '数据处理', '模型', '预测'
            ],
            '商业营销': [
                'seo', '营销', '推广', '广告', '品牌', '电商', '销售',
                '市场', '客户', '转化', '流量', '运营'
            ],
            '教育培训': [
                '学习', '教程', '课程', '培训', '教育', '考试', '证书',
                '技能', '知识', '学校', '老师', '学生'
            ],
            '生活服务': [
                '生活', '服务', '购物', '美食', '旅游', '健康', '医疗',
                '娱乐', '游戏', '音乐', '电影', '体育'
            ],
            '财经金融': [
                '投资', '理财', '股票', '基金', '保险', '贷款', '银行',
                '金融', '经济', '财务', '会计', '税务'
            ],
            '科技产品': [
                '手机', '电脑', '软件', '硬件', '电子', '数码', '科技',
                '互联网', '网络', '云计算', '物联网', '区块链'
            ],
            '其他': []
        }
        
        return categories
    
    def preprocess_text(self, text: str) -> str:
        """
        文本预处理
        
        Args:
            text: 原始文本
        
        Returns:
            处理后的文本
        """
        if not isinstance(text, str):
            return ""
        
        # 中文分词
        words = jieba.cut(text, cut_all=False)
        
        # 过滤停用词和短词
        filtered_words = []
        for word in words:
            word = word.strip()
            if len(word) > 1 and word.isalnum():
                filtered_words.append(word)
        
        return ' '.join(filtered_words)
    
    def extract_tfidf_features(self, keywords: List[str]) -> Tuple[np.ndarray, List[str]]:
        """
        提取TF-IDF特征
        
        Args:
            keywords: 关键词列表
        
        Returns:
            TF-IDF特征矩阵和特征名称
        """
        self.logger.info("提取TF-IDF特征...")
        
        # 预处理文本
        processed_texts = [self.preprocess_text(keyword) for keyword in keywords]
        
        # 提取TF-IDF特征
        tfidf_matrix = self.tfidf_vectorizer.fit_transform(processed_texts)
        feature_names = self.tfidf_vectorizer.get_feature_names_out()
        
        self.logger.info(f"TF-IDF特征提取完成，特征维度: {tfidf_matrix.shape}")
        
        return tfidf_matrix.toarray(), feature_names
    
    def perform_clustering(self, features: np.ndarray) -> Tuple[np.ndarray, Dict]:
        """
        执行聚类分析
        
        Args:
            features: 特征矩阵
        
        Returns:
            聚类标签和聚类信息
        """
        self.logger.info(f"开始{self.clustering_algorithm}聚类分析...")
        
        if self.clustering_algorithm == 'kmeans':
            clusterer = KMeans(
                n_clusters=self.n_clusters,
                random_state=42,
                n_init=10
            )
        elif self.clustering_algorithm == 'dbscan':
            clusterer = DBSCAN(eps=0.5, min_samples=5)
        else:
            raise ValueError(f"不支持的聚类算法: {self.clustering_algorithm}")
        
        cluster_labels = clusterer.fit_predict(features)
        
        # 计算聚类质量
        if len(set(cluster_labels)) > 1:
            silhouette_avg = silhouette_score(features, cluster_labels)
        else:
            silhouette_avg = 0
        
        cluster_info = {
            'algorithm': self.clustering_algorithm,
            'n_clusters': len(set(cluster_labels)),
            'silhouette_score': silhouette_avg,
            'cluster_sizes': Counter(cluster_labels)
        }
        
        self.logger.info(f"聚类完成，聚类数量: {cluster_info['n_clusters']}, "
                        f"轮廓系数: {silhouette_avg:.4f}")
        
        return cluster_labels, cluster_info
    
    def analyze_clusters(self, keywords: List[str], cluster_labels: np.ndarray, 
                        features: np.ndarray, feature_names: List[str]) -> Dict:
        """
        分析聚类结果
        
        Args:
            keywords: 关键词列表
            cluster_labels: 聚类标签
            features: 特征矩阵
            feature_names: 特征名称
        
        Returns:
            聚类分析结果
        """
        self.logger.info("分析聚类结果...")
        
        cluster_analysis = {}
        
        for cluster_id in set(cluster_labels):
            if cluster_id == -1:  # DBSCAN噪声点
                continue
            
            # 获取该聚类的关键词
            cluster_mask = cluster_labels == cluster_id
            cluster_keywords = [keywords[i] for i in range(len(keywords)) if cluster_mask[i]]
            cluster_features = features[cluster_mask]
            
            # 计算该聚类的中心特征
            center_features = cluster_features.mean(axis=0)
            
            # 找出最重要的特征
            top_feature_indices = center_features.argsort()[-10:][::-1]
            top_features = [feature_names[i] for i in top_feature_indices]
            
            # 关键词频率分析
            all_words = []
            for keyword in cluster_keywords:
                words = jieba.cut(keyword, cut_all=False)
                all_words.extend([word for word in words if len(word) > 1])
            
            word_freq = Counter(all_words).most_common(10)
            
            cluster_analysis[cluster_id] = {
                'size': len(cluster_keywords),
                'keywords': cluster_keywords[:20],  # 显示前20个关键词
                'top_features': top_features,
                'word_frequency': word_freq,
                'sample_keywords': cluster_keywords[:5]
            }
        
        return cluster_analysis
    
    def rule_based_classification(self, keywords: List[str]) -> List[str]:
        """
        基于规则的分类
        
        Args:
            keywords: 关键词列表
        
        Returns:
            分类标签列表
        """
        self.logger.info("执行基于规则的分类...")
        
        classifications = []
        
        for keyword in keywords:
            keyword_lower = keyword.lower()
            classified = False
            
            # 检查每个预定义分类
            for category, category_keywords in self.predefined_categories.items():
                if category == '其他':
                    continue
                
                for cat_keyword in category_keywords:
                    if cat_keyword in keyword_lower:
                        classifications.append(category)
                        classified = True
                        break
                
                if classified:
                    break
            
            # 如果没有匹配到任何分类，归为"其他"
            if not classified:
                classifications.append('其他')
        
        return classifications
    
    def train_ml_classifier(self, keywords: List[str], labels: List[str]) -> Dict:
        """
        训练机器学习分类器
        
        Args:
            keywords: 关键词列表
            labels: 标签列表
        
        Returns:
            训练结果
        """
        self.logger.info("训练机器学习分类器...")
        
        # 提取特征
        features, _ = self.extract_tfidf_features(keywords)
        
        # 分割训练集和测试集
        classifier_config = self.classification_config.get('classifier', {})
        test_size = classifier_config.get('test_size', 0.2)
        
        X_train, X_test, y_train, y_test = train_test_split(
            features, labels, test_size=test_size, random_state=42, stratify=labels
        )
        
        # 选择分类器
        if self.classifier_algorithm == 'random_forest':
            classifier = RandomForestClassifier(n_estimators=100, random_state=42)
        elif self.classifier_algorithm == 'naive_bayes':
            classifier = MultinomialNB()
        elif self.classifier_algorithm == 'svm':
            classifier = SVC(kernel='rbf', random_state=42)
        else:
            raise ValueError(f"不支持的分类算法: {self.classifier_algorithm}")
        
        # 训练模型
        classifier.fit(X_train, y_train)
        
        # 预测和评估
        y_pred = classifier.predict(X_test)
        report = classification_report(y_test, y_pred, output_dict=True, zero_division=0)
        
        # 保存模型
        model_info = {
            'classifier': classifier,
            'vectorizer': self.tfidf_vectorizer,
            'algorithm': self.classifier_algorithm,
            'accuracy': report['accuracy'],
            'classification_report': report
        }
        
        self.logger.info(f"分类器训练完成，准确率: {report['accuracy']:.4f}")
        
        return model_info
    
    def classify_keywords(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        对关键词进行分类
        
        Args:
            df: 关键词数据DataFrame
        
        Returns:
            添加分类结果的DataFrame
        """
        self.logger.info("开始关键词分类...")
        
        keywords = df['keyword'].tolist()
        
        # 1. 提取TF-IDF特征
        features, feature_names = self.extract_tfidf_features(keywords)
        
        # 2. 聚类分析
        cluster_labels, cluster_info = self.perform_clustering(features)
        
        # 3. 聚类分析
        cluster_analysis = self.analyze_clusters(keywords, cluster_labels, features, feature_names)
        
        # 4. 基于规则的分类
        rule_based_labels = self.rule_based_classification(keywords)
        
        # 5. 训练机器学习分类器
        ml_model_info = self.train_ml_classifier(keywords, rule_based_labels)
        
        # 6. 使用训练好的模型进行分类
        ml_predictions = ml_model_info['classifier'].predict(features)
        
        # 7. 添加分类结果到DataFrame
        result_df = df.copy()
        result_df['cluster_id'] = cluster_labels
        result_df['rule_based_category'] = rule_based_labels
        result_df['ml_category'] = ml_predictions
        
        # 8. 计算分类置信度
        if hasattr(ml_model_info['classifier'], 'predict_proba'):
            probabilities = ml_model_info['classifier'].predict_proba(features)
            result_df['classification_confidence'] = probabilities.max(axis=1)
        else:
            result_df['classification_confidence'] = 1.0
        
        # 9. 添加聚类信息
        cluster_sizes = Counter(cluster_labels)
        result_df['cluster_size'] = result_df['cluster_id'].map(cluster_sizes)
        
        # 10. 保存分析结果
        analysis_results = {
            'cluster_info': cluster_info,
            'cluster_analysis': cluster_analysis,
            'model_info': {
                'algorithm': ml_model_info['algorithm'],
                'accuracy': ml_model_info['accuracy'],
                'feature_count': len(feature_names)
            },
            'category_distribution': Counter(rule_based_labels),
            'ml_category_distribution': Counter(ml_predictions)
        }
        
        # 保存分析结果
        def _to_builtin_scalar(value):
            if isinstance(value, (np.integer,)):
                return int(value)
            if isinstance(value, (np.floating,)):
                return float(value)
            return value

        def _make_json_serializable(obj):
            if isinstance(obj, dict):
                return {str(_to_builtin_scalar(k)):_make_json_serializable(v) for k, v in obj.items()}
            if isinstance(obj, list):
                return [_make_json_serializable(v) for v in obj]
            if isinstance(obj, tuple):
                return tuple(_make_json_serializable(v) for v in obj)
            if isinstance(obj, Counter):
                return {str(_to_builtin_scalar(k)):_to_builtin_scalar(v) for k, v in obj.items()}
            return _to_builtin_scalar(obj)

        with open('data/results/classification_analysis.json', 'w', encoding='utf-8') as f:
            json.dump(_make_json_serializable(analysis_results), f, ensure_ascii=False, indent=2)
        
        # 保存模型
        with open('data/results/classification_model.pkl', 'wb') as f:
            pickle.dump(ml_model_info, f)
        
        self.logger.info("关键词分类完成")
        
        return result_df
    
    def generate_classification_report(self, df: pd.DataFrame) -> Dict:
        """
        生成分类报告
        
        Args:
            df: 分类结果DataFrame
        
        Returns:
            分类报告
        """
        report = {
            'total_keywords': len(df),
            'unique_clusters': df['cluster_id'].nunique(),
            'unique_rule_categories': df['rule_based_category'].nunique(),
            'unique_ml_categories': df['ml_category'].nunique(),
            'avg_confidence': df['classification_confidence'].mean(),
            'cluster_distribution': df['cluster_id'].value_counts().to_dict(),
            'rule_category_distribution': df['rule_based_category'].value_counts().to_dict(),
            'ml_category_distribution': df['ml_category'].value_counts().to_dict(),
            'top_confident_predictions': df.nlargest(10, 'classification_confidence')[
                ['keyword', 'ml_category', 'classification_confidence']
            ].to_dict('records')
        }
        
        return report


def main():
    """测试函数"""
    from utils import load_config, setup_logging, save_data
    
    # 加载配置
    config = load_config()
    logger = setup_logging(config)
    
    # 创建分类器
    classifier = KeywordClassifier(config)
    
    try:
        # 读取清洗后的数据
        df = pd.read_csv('data/cleaned/cleaned_keywords.csv')
        print(f"读取到 {len(df)} 个清洗后的关键词")
        
        # 分类关键词
        classified_df = classifier.classify_keywords(df)
        
        # 生成报告
        report = classifier.generate_classification_report(classified_df)
        print("分类报告:")
        for key, value in report.items():
            if key not in ['top_confident_predictions']:
                print(f"  {key}: {value}")
        
        # 保存结果
        output_config = config.get('output', {})
        save_data(
            classified_df,
            'classified_keywords',
            output_config.get('formats', ['csv']),
            output_config.get('save_path', 'data/results')
        )
        
        print(f"分类完成，结果已保存")
        
    except FileNotFoundError:
        print("未找到清洗后的数据文件，请先运行数据清洗")


if __name__ == "__main__":
    main()
