#!/usr/bin/env python3
"""
路透社新闻多标签主题分类 - Auto-sklearn多标签分类建模
======================================================

这是一个完整的多标签分类示例，展示如何使用Auto-sklearn
对新闻文章进行多主题标签自动识别和分类。

功能特色：
- 全自动多标签分类建模
- 文本数据处理和特征工程
- 多标签专用评估指标
- 标签共现分析和可视化
"""

import os
import sys
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets, model_selection, metrics
from sklearn.utils.multiclass import type_of_target
from sklearn.preprocessing import MultiLabelBinarizer
import autosklearn.classification
from datetime import datetime
import joblib
from collections import Counter
import itertools

# 设置警告过滤和显示
warnings.filterwarnings('ignore')
plt.style.use('seaborn-v0_8')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class ReutersMultiLabelClassifier:
    """路透社新闻多标签分类器"""
    
    def __init__(self, time_limit=300, per_run_limit=60):
        """
        初始化分类器
        
        Args:
            time_limit: Auto-sklearn总运行时间（秒）
            per_run_limit: 单个模型最大运行时间（秒）
        """
        self.time_limit = time_limit
        self.per_run_limit = per_run_limit
        self.automl = None
        self.label_names = None
        self.results = {}
        
    def load_and_prepare_data(self):
        """加载和预处理路透社新闻数据集"""
        print("正在加载路透社新闻数据集...")
        
        try:
            # 加载数据 - 使用OpenML的路透社数据集
            X, y = datasets.fetch_openml(
                data_id=40594, 
                return_X_y=True, 
                as_frame=False,
                parser='auto'
            )
            
            print(f"数据加载成功！")
            print(f"特征矩阵形状: {X.shape}")
            print(f"标签矩阵形状: {y.shape}")
            
        except Exception as e:
            print(f"OpenML数据加载失败: {e}")
            print("使用模拟多标签数据...")
            # 创建模拟多标签数据
            X, y = self._create_simulated_multilabel_data()
        
        # 处理标签格式
        y = self._process_labels(y)
        
        # 验证数据格式
        target_type = type_of_target(y)
        print(f"目标类型: {target_type}")
        
        if target_type != 'multilabel-indicator':
            print("⚠️  数据不是标准多标签格式，正在转换...")
            y = self._convert_to_multilabel_format(y)
        
        # 数据集基本信息
        n_samples, n_features = X.shape
        n_labels = y.shape[1] if len(y.shape) > 1 else 1
        
        print(f"\\n📊 数据集概览:")
        print(f"- 样本数量: {n_samples:,}")
        print(f"- 特征数量: {n_features:,}")
        print(f"- 标签数量: {n_labels}")
        print(f"- 平均每样本标签数: {y.sum(axis=1).mean():.2f}")
        print(f"- 数据稀疏度: {(X == 0).sum() / X.size * 100:.1f}%")
        
        # 训练集和测试集分割
        X_train, X_test, y_train, y_test = model_selection.train_test_split(
            X, y, test_size=0.25, random_state=42, stratify=None
        )
        
        print(f"\\n📝 数据分割:")
        print(f"- 训练集: {X_train.shape[0]} 样本")
        print(f"- 测试集: {X_test.shape[0]} 样本")
        
        # 保存标签信息
        self.label_names = [f'Label_{i}' for i in range(y.shape[1])]
        
        return X_train, X_test, y_train, y_test
    
    def _create_simulated_multilabel_data(self):
        """创建模拟多标签数据（当真实数据不可用时）"""
        print("创建模拟路透社新闻多标签数据...")
        
        from sklearn.datasets import make_multilabel_classification
        
        X, y = make_multilabel_classification(
            n_samples=2000,
            n_features=1000,
            n_classes=10,
            n_labels=3,
            length=50,
            allow_unlabeled=False,
            sparse=True,
            return_indicator='dense',
            random_state=42
        )
        
        # 转换为稀疏矩阵以模拟文本特征
        from scipy.sparse import csr_matrix
        X = csr_matrix(X).toarray()
        
        print("✅ 模拟数据创建完成")
        return X, y
    
    def _process_labels(self, y):
        """处理标签格式"""
        if isinstance(y, (list, np.ndarray)) and len(y.shape) == 1:
            # 如果是字符串数组，需要转换
            if y.dtype == 'object':
                print("处理字符串标签格式...")
                # 将TRUE/FALSE转换为1/0
                if isinstance(y[0], str):
                    y = np.where(y == 'TRUE', 1, 0)
                    return y.reshape(-1, 1)
        
        # 如果已经是正确格式，直接返回
        return y
    
    def _convert_to_multilabel_format(self, y):
        """转换为多标签指示器格式"""
        if len(y.shape) == 1:
            # 单标签转多标签
            from sklearn.preprocessing import LabelBinarizer
            lb = LabelBinarizer()
            return lb.fit_transform(y)
        return y
    
    def analyze_label_distribution(self, y_train, y_test):
        """分析标签分布"""
        print("\\n📊 标签分布分析")
        print("=" * 50)
        
        # 标签频率统计
        train_label_counts = y_train.sum(axis=0)
        test_label_counts = y_test.sum(axis=0)
        
        print("标签频率 (训练集 / 测试集):")
        for i, (train_count, test_count) in enumerate(zip(train_label_counts, test_label_counts)):
            label_name = self.label_names[i] if self.label_names else f"Label_{i}"
            print(f"- {label_name}: {train_count} / {test_count}")
        
        # 标签组合分析
        print(f"\\n📋 标签组合分析:")
        train_label_combinations = y_train.sum(axis=1)
        unique_combinations, counts = np.unique(train_label_combinations, return_counts=True)
        
        print("每样本标签数量分布:")
        for combo, count in zip(unique_combinations, counts):
            print(f"- {int(combo)} 个标签: {count} 样本 ({count/len(y_train)*100:.1f}%)")
        
        return train_label_counts, test_label_counts
    
    def create_multilabel_automl(self):
        """创建多标签Auto-sklearn分类器"""
        print("\\n正在创建Auto-sklearn多标签分类器...")
        
        # 配置AutoML分类器
        self.automl = autosklearn.classification.AutoSklearnClassifier(
            time_left_for_this_task=self.time_limit,
            per_run_time_limit=self.per_run_limit,
            initial_configurations_via_metalearning=10,
            smac_scenario_args={'runcount_limit': 20},
            resampling_strategy='cv',
            resampling_strategy_arguments={'folds': 3},
            tmp_folder="/tmp/autosklearn_multilabel_reuters",
            seed=42,
            memory_limit=4096,
            include={
                'classifier': [
                    'random_forest', 'extra_trees', 'gradient_boosting',
                    'k_nearest_neighbors', 'libsvm_svc', 'sgd'
                ]
            },
            # 多标签设置
            metric=autosklearn.metrics.f1_macro
        )
        
        return self.automl
    
    def train_model(self, X_train, y_train):
        """训练Auto-sklearn多标签模型"""
        print(f"\\n开始训练多标签模型 (时间限制: {self.time_limit}秒)...")
        print("=" * 60)
        
        start_time = datetime.now()
        
        # 训练模型
        self.automl.fit(
            X_train, y_train,
            dataset_name="reuters_multilabel"
        )
        
        end_time = datetime.now()
        training_time = (end_time - start_time).total_seconds()
        
        print(f"\\n✅ 模型训练完成，耗时: {training_time:.1f}秒")
        print("=" * 60)
        
        return training_time
    
    def calculate_multilabel_metrics(self, y_true, y_pred, y_pred_proba=None):
        """计算多标签专用评估指标"""
        metrics_dict = {}
        
        # 基于样本的指标
        metrics_dict['exact_match_ratio'] = metrics.accuracy_score(y_true, y_pred)
        metrics_dict['hamming_loss'] = metrics.hamming_loss(y_true, y_pred)
        
        # 基于标签的指标
        metrics_dict['micro_f1'] = metrics.f1_score(y_true, y_pred, average='micro')
        metrics_dict['macro_f1'] = metrics.f1_score(y_true, y_pred, average='macro')
        metrics_dict['weighted_f1'] = metrics.f1_score(y_true, y_pred, average='weighted')
        
        # 精度和召回率
        metrics_dict['micro_precision'] = metrics.precision_score(y_true, y_pred, average='micro')
        metrics_dict['macro_precision'] = metrics.precision_score(y_true, y_pred, average='macro')
        metrics_dict['micro_recall'] = metrics.recall_score(y_true, y_pred, average='micro')
        metrics_dict['macro_recall'] = metrics.recall_score(y_true, y_pred, average='macro')
        
        # 每个标签的性能
        label_f1_scores = metrics.f1_score(y_true, y_pred, average=None)
        metrics_dict['per_label_f1'] = label_f1_scores
        
        # 排序相关指标（如果有概率预测）
        if y_pred_proba is not None:
            try:
                metrics_dict['coverage_error'] = metrics.coverage_error(y_true, y_pred_proba)
                metrics_dict['label_ranking_loss'] = metrics.label_ranking_loss(y_true, y_pred_proba)
                metrics_dict['label_ranking_avg_precision'] = metrics.label_ranking_average_precision_score(y_true, y_pred_proba)
            except:
                print("⚠️  部分排序指标计算失败")
        
        return metrics_dict
    
    def evaluate_model(self, X_train, X_test, y_train, y_test):
        """全面评估多标签模型性能"""
        print("\\n正在评估多标签模型性能...")
        
        # 生成预测
        train_pred = self.automl.predict(X_train)
        test_pred = self.automl.predict(X_test)
        
        # 尝试获取概率预测
        try:
            train_pred_proba = self.automl.predict_proba(X_train)
            test_pred_proba = self.automl.predict_proba(X_test)
        except:
            print("⚠️  无法获取概率预测")
            train_pred_proba = None
            test_pred_proba = None
        
        # 计算评估指标
        train_metrics = self.calculate_multilabel_metrics(y_train, train_pred, train_pred_proba)
        test_metrics = self.calculate_multilabel_metrics(y_test, test_pred, test_pred_proba)
        
        # 保存结果
        self.results.update({
            'train_predictions': train_pred,
            'test_predictions': test_pred,
            'train_pred_proba': train_pred_proba,
            'test_pred_proba': test_pred_proba,
            'train_metrics': train_metrics,
            'test_metrics': test_metrics,
            'y_train': y_train,
            'y_test': y_test
        })
        
        # 显示评估结果
        self._display_evaluation_results(train_metrics, test_metrics)
        
        return train_metrics, test_metrics
    
    def _display_evaluation_results(self, train_metrics, test_metrics):
        """显示评估结果"""
        print("\\n📊 多标签模型性能评估")
        print("=" * 70)
        print(f"{'指标':<25} {'训练集':<15} {'测试集':<15} {'说明':<20}")
        print("-" * 70)
        
        metrics_info = [
            ('exact_match_ratio', '完全匹配率', '越高越好'),
            ('hamming_loss', '汉明损失', '越低越好'),
            ('micro_f1', 'Micro F1', '整体性能'),
            ('macro_f1', 'Macro F1', '平衡性能'),
            ('micro_precision', 'Micro 精度', '整体精度'),
            ('macro_precision', 'Macro 精度', '平衡精度'),
            ('micro_recall', 'Micro 召回', '整体召回'),
            ('macro_recall', 'Macro 召回', '平衡召回')
        ]
        
        for metric_key, metric_name, description in metrics_info:
            train_val = train_metrics.get(metric_key, 0)
            test_val = test_metrics.get(metric_key, 0)
            print(f"{metric_name:<25} {train_val:<15.4f} {test_val:<15.4f} {description:<20}")
        
        # 性能诊断
        self._diagnose_multilabel_performance(test_metrics)
    
    def _diagnose_multilabel_performance(self, metrics):
        """诊断多标签模型性能"""
        print("\\n🔍 多标签模型性能诊断")
        print("=" * 50)
        
        exact_match = metrics['exact_match_ratio']
        micro_f1 = metrics['micro_f1']
        macro_f1 = metrics['macro_f1']
        hamming_loss = metrics['hamming_loss']
        
        # 整体性能评估
        if exact_match >= 0.5:
            print("✅ 优秀：完全匹配率高，模型预测非常准确")
        elif exact_match >= 0.3:
            print("✅ 良好：完全匹配率可接受")
        elif exact_match >= 0.1:
            print("⚠️  一般：完全匹配率偏低，需要优化")
        else:
            print("❌ 差：完全匹配率很低，模型需要重新设计")
        
        # F1分数评估
        if micro_f1 >= 0.8 and macro_f1 >= 0.6:
            print("✅ F1性能：微平均和宏平均F1都表现良好")
        elif micro_f1 >= 0.7:
            print("✅ F1性能：微平均F1良好，但标签平衡性需改进")
        else:
            print("⚠️  F1性能：需要提升模型性能")
        
        # 汉明损失评估
        if hamming_loss <= 0.1:
            print("✅ 标签预测：汉明损失低，单标签预测准确")
        elif hamming_loss <= 0.2:
            print("⚠️  标签预测：汉明损失中等，有改进空间")
        else:
            print("❌ 标签预测：汉明损失高，单标签预测不准确")
    
    def analyze_model_insights(self):
        """分析模型洞察"""
        print("\\n🔬 多标签模型分析和洞察")
        print("=" * 60)
        
        # 显示最佳模型
        print("🏆 最优模型排行榜:")
        try:
            print(self.automl.leaderboard())
        except Exception as e:
            print(f"无法显示排行榜: {e}")
        
        print("\\n🔧 最终集成模型构成:")
        try:
            print(self.automl.show_models())
        except Exception as e:
            print(f"无法显示模型详情: {e}")
        
        # 每标签性能分析
        if 'per_label_f1' in self.results['test_metrics']:
            print("\\n📊 各标签F1性能:")
            per_label_f1 = self.results['test_metrics']['per_label_f1']
            for i, f1_score in enumerate(per_label_f1):
                label_name = self.label_names[i] if self.label_names else f"Label_{i}"
                print(f"- {label_name}: {f1_score:.3f}")
    
    def create_multilabel_visualizations(self):
        """创建多标签可视化图表"""
        print("\\n正在生成多标签可视化图表...")
        
        y_test = self.results['y_test']
        test_pred = self.results['test_predictions']
        
        # 创建综合分析图表
        fig, axes = plt.subplots(2, 3, figsize=(18, 12))
        fig.suptitle('路透社新闻多标签分类分析', fontsize=16, fontweight='bold')
        
        # 1. 标签频率分布
        ax1 = axes[0, 0]
        label_frequencies = y_test.sum(axis=0)
        label_names_short = [f'L{i}' for i in range(len(label_frequencies))]
        bars = ax1.bar(label_names_short, label_frequencies, color='skyblue', alpha=0.7)
        ax1.set_xlabel('标签')
        ax1.set_ylabel('频次')
        ax1.set_title('测试集标签频率分布')
        ax1.tick_params(axis='x', rotation=45)
        
        # 添加数值标签
        for bar in bars:
            height = bar.get_height()
            ax1.text(bar.get_x() + bar.get_width()/2., height,
                    f'{int(height)}', ha='center', va='bottom')
        
        # 2. 每样本标签数量分布
        ax2 = axes[0, 1]
        labels_per_sample_true = y_test.sum(axis=1)
        labels_per_sample_pred = test_pred.sum(axis=1)
        
        bins = np.arange(0, max(labels_per_sample_true.max(), labels_per_sample_pred.max()) + 2) - 0.5
        ax2.hist(labels_per_sample_true, bins=bins, alpha=0.7, label='真实', color='blue')
        ax2.hist(labels_per_sample_pred, bins=bins, alpha=0.7, label='预测', color='red')
        ax2.set_xlabel('每样本标签数量')
        ax2.set_ylabel('样本数量')
        ax2.set_title('标签数量分布对比')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 3. 性能指标雷达图
        ax3 = axes[0, 2]
        metrics = self.results['test_metrics']
        metric_names = ['Exact Match', 'Micro F1', 'Macro F1', 'Micro Precision', 'Macro Precision']
        metric_values = [
            metrics['exact_match_ratio'],
            metrics['micro_f1'],
            metrics['macro_f1'],
            metrics['micro_precision'],
            metrics['macro_precision']
        ]
        
        # 简化雷达图为条形图
        bars = ax3.barh(metric_names, metric_values, color='green', alpha=0.7)
        ax3.set_xlabel('分数')
        ax3.set_title('模型性能指标')
        ax3.set_xlim(0, 1)
        
        # 添加数值标签
        for i, bar in enumerate(bars):
            width = bar.get_width()
            ax3.text(width + 0.01, bar.get_y() + bar.get_height()/2,
                    f'{metric_values[i]:.3f}', ha='left', va='center')
        
        # 4. 标签共现热图
        ax4 = axes[1, 0]
        if y_test.shape[1] <= 20:  # 只有在标签数量不太多时才显示
            cooccurrence = np.dot(y_test.T, y_test)
            np.fill_diagonal(cooccurrence, 0)  # 移除对角线
            
            im = ax4.imshow(cooccurrence, cmap='Blues', aspect='auto')
            ax4.set_xlabel('标签')
            ax4.set_ylabel('标签')
            ax4.set_title('标签共现矩阵')
            
            # 添加colorbar
            plt.colorbar(im, ax=ax4, shrink=0.8)
        else:
            ax4.text(0.5, 0.5, '标签数量过多\\n无法显示共现矩阵', 
                    ha='center', va='center', transform=ax4.transAxes)
            ax4.set_title('标签共现矩阵')
        
        # 5. 汉明损失分布
        ax5 = axes[1, 1]
        hamming_losses = []
        for i in range(len(y_test)):
            hamming_loss = np.sum(y_test[i] != test_pred[i]) / len(y_test[i])
            hamming_losses.append(hamming_loss)
        
        ax5.hist(hamming_losses, bins=20, alpha=0.7, color='orange', edgecolor='black')
        ax5.set_xlabel('样本汉明损失')
        ax5.set_ylabel('样本数量')
        ax5.set_title('样本级汉明损失分布')
        ax5.axvline(x=np.mean(hamming_losses), color='red', linestyle='--', 
                   label=f'平均值: {np.mean(hamming_losses):.3f}')
        ax5.legend()
        ax5.grid(True, alpha=0.3)
        
        # 6. 各标签F1分数
        ax6 = axes[1, 2]
        if 'per_label_f1' in metrics:
            per_label_f1 = metrics['per_label_f1']
            label_indices = range(len(per_label_f1))
            bars = ax6.bar(label_indices, per_label_f1, color='purple', alpha=0.7)
            ax6.set_xlabel('标签索引')
            ax6.set_ylabel('F1分数')
            ax6.set_title('各标签F1性能')
            ax6.grid(True, alpha=0.3)
            
            # 添加平均线
            avg_f1 = np.mean(per_label_f1)
            ax6.axhline(y=avg_f1, color='red', linestyle='--', 
                       label=f'平均F1: {avg_f1:.3f}')
            ax6.legend()
        
        plt.tight_layout()
        
        # 保存图表
        os.makedirs('../results', exist_ok=True)
        plt.savefig('../results/multilabel_analysis.png', dpi=300, bbox_inches='tight')
        print("✅ 图表已保存至 ../results/multilabel_analysis.png")
        
        return fig
    
    def save_model_and_results(self):
        """保存模型和结果"""
        print("\\n正在保存模型和结果...")
        
        os.makedirs('../results', exist_ok=True)
        
        # 保存Auto-sklearn模型
        model_path = '../results/reuters_multilabel_model.pkl'
        joblib.dump(self.automl, model_path)
        
        # 保存评估结果
        results_path = '../results/multilabel_results.pkl'
        joblib.dump(self.results, results_path)
        
        print(f"✅ 模型已保存至: {model_path}")
        print(f"✅ 结果已保存至: {results_path}")
    
    def generate_multilabel_report(self):
        """生成多标签分类应用报告"""
        print("\\n📋 多标签分类应用报告")
        print("=" * 60)
        
        test_metrics = self.results['test_metrics']
        
        print("🎯 多标签分类性能摘要:")
        print(f"- 完全匹配准确率: {test_metrics['exact_match_ratio']:.1%}")
        print(f"- 微平均F1分数: {test_metrics['micro_f1']:.3f}")
        print(f"- 宏平均F1分数: {test_metrics['macro_f1']:.3f}")
        print(f"- 汉明损失: {test_metrics['hamming_loss']:.3f}")
        
        print("\\n💡 新闻分类系统应用建议:")
        if test_metrics['exact_match_ratio'] >= 0.3:
            print("✅ 推荐用于自动新闻标签系统")
            print("✅ 可用于内容推荐和分类")
            print("✅ 适合大规模新闻处理")
        else:
            print("⚠️  建议结合人工审核使用")
            print("⚠️  需要进一步优化模型性能")
        
        print("\\n🔄 持续改进建议:")
        print("- 收集更多标注数据")
        print("- 优化文本预处理和特征工程")
        print("- 尝试深度学习方法(BERT等)")
        print("- 考虑标签层次结构")
        print("- 建立用户反馈机制")

def main():
    """主函数"""
    print("📰 路透社新闻多标签主题分类系统")
    print("=" * 60)
    
    # 创建分类器
    classifier = ReutersMultiLabelClassifier(time_limit=300, per_run_limit=60)
    
    try:
        # 1. 数据加载和预处理
        X_train, X_test, y_train, y_test = classifier.load_and_prepare_data()
        
        # 2. 标签分布分析
        classifier.analyze_label_distribution(y_train, y_test)
        
        # 3. 创建和训练模型
        classifier.create_multilabel_automl()
        training_time = classifier.train_model(X_train, y_train)
        
        # 4. 模型评估
        train_metrics, test_metrics = classifier.evaluate_model(X_train, X_test, y_train, y_test)
        
        # 5. 模型分析
        classifier.analyze_model_insights()
        
        # 6. 可视化
        classifier.create_multilabel_visualizations()
        
        # 7. 保存结果
        classifier.save_model_and_results()
        
        # 8. 生成应用报告
        classifier.generate_multilabel_report()
        
        print(f"\\n🎉 多标签分类分析完成！总耗时: {training_time:.1f}秒")
        print("📂 结果文件保存在 ../results/ 目录中")
        
    except Exception as e:
        print(f"❌ 运行出错: {str(e)}")
        import traceback
        traceback.print_exc()
        raise

if __name__ == "__main__":
    main()