"""
高速列车轴承智能故障诊断 - 可解释性分析模块

本模块包含：
1. 多种可解释性分析方法（SHAP、LIME、Grad-CAM等）
2. 迁移过程可解释性分析
3. 综合可解释性报告生成

作者：数学建模团队
版本：1.0
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.manifold import TSNE
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from scipy.spatial.distance import jensenshannon, cosine
import tensorflow as tf
from tensorflow.keras.models import Model
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 尝试导入可解释性库
try:
    import shap
    SHAP_AVAILABLE = True
except ImportError:
    SHAP_AVAILABLE = False
    print("警告: SHAP库未安装，SHAP分析功能将不可用")

try:
    import lime
    import lime.lime_tabular
    LIME_AVAILABLE = True
except ImportError:
    LIME_AVAILABLE = False
    print("警告: LIME库未安装，LIME分析功能将不可用")


class ExplainableAIMethods:
    """可解释性AI方法类"""
    
    def __init__(self, model, feature_names, class_names):
        """
        初始化可解释性AI方法
        
        Args:
            model: 要解释的模型
            feature_names (list): 特征名称列表
            class_names (list): 类别名称列表
        """
        self.model = model
        self.feature_names = feature_names
        self.class_names = class_names
        
    def shap_analysis(self, X, sample_indices=None):
        """
        SHAP分析
        
        Args:
            X (np.array): 输入数据
            sample_indices (list): 样本索引列表
            
        Returns:
            np.array: SHAP值
        """
        if not SHAP_AVAILABLE:
            print("SHAP库未安装，无法进行SHAP分析")
            return None
            
        if sample_indices is None:
            sample_indices = range(min(100, len(X)))
        
        try:
            # 创建解释器
            explainer = shap.DeepExplainer(self.model, X)
            
            # 计算SHAP值
            shap_values = explainer.shap_values(X[sample_indices])
            
            # 可视化
            plt.figure(figsize=(15, 10))
            
            # 摘要图
            plt.subplot(2, 2, 1)
            shap.summary_plot(shap_values, X[sample_indices], 
                            feature_names=self.feature_names[:X.shape[1]])
            plt.title('SHAP摘要图')
            
            # 特征重要性
            plt.subplot(2, 2, 2)
            shap.summary_plot(shap_values, X[sample_indices], plot_type="bar", 
                            feature_names=self.feature_names[:X.shape[1]])
            plt.title('特征重要性')
            
            # 单个样本解释
            if len(sample_indices) > 0:
                plt.subplot(2, 2, 3)
                shap.force_plot(explainer.expected_value[0], shap_values[0][0], 
                              X[sample_indices][0], 
                              feature_names=self.feature_names[:X.shape[1]],
                              matplotlib=True)
                plt.title('样本0的力导向图')
            
            plt.tight_layout()
            plt.show()
            
            return shap_values
        except Exception as e:
            print(f"SHAP分析出错: {str(e)}")
            return None
    
    def lime_analysis(self, X, sample_index=0):
        """
        LIME分析
        
        Args:
            X (np.array): 输入数据
            sample_index (int): 样本索引
            
        Returns:
            LIME解释对象
        """
        if not LIME_AVAILABLE:
            print("LIME库未安装，无法进行LIME分析")
            return None
            
        try:
            # 创建LIME解释器
            explainer = lime.lime_tabular.LimeTabularExplainer(
                X, feature_names=self.feature_names, 
                class_names=self.class_names, mode='classification'
            )
            
            # 解释单个样本
            exp = explainer.explain_instance(X[sample_index], self.model.predict, num_features=10)
            
            # 可视化
            plt.figure(figsize=(12, 8))
            exp.as_pyplot_figure()
            plt.title(f'样本 {sample_index} 的LIME解释')
            plt.tight_layout()
            plt.show()
            
            return exp
        except Exception as e:
            print(f"LIME分析出错: {str(e)}")
            return None
    
    def grad_cam_analysis(self, X, sample_index=0, layer_name='conv1d_2'):
        """
        Grad-CAM分析
        
        Args:
            X (np.array): 输入数据
            sample_index (int): 样本索引
            layer_name (str): 层名称
            
        Returns:
            np.array: 热力图
        """
        try:
            # 获取指定层的输出和梯度
            grad_model = tf.keras.models.Model(
                [self.model.inputs], 
                [self.model.get_layer(layer_name).output, self.model.output]
            )
            
            with tf.GradientTape() as tape:
                conv_outputs, predictions = grad_model(X[sample_index:sample_index+1])
                loss = predictions[:, tf.argmax(predictions[0])]
            
            # 计算梯度
            grads = tape.gradient(loss, conv_outputs)
            pooled_grads = tf.reduce_mean(grads, axis=(0, 1))
            
            # 计算热力图
            conv_outputs = conv_outputs[0]
            heatmap = tf.reduce_mean(tf.multiply(conv_outputs, pooled_grads), axis=-1)
            heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
            
            # 可视化
            plt.figure(figsize=(12, 6))
            plt.subplot(1, 2, 1)
            plt.plot(X[sample_index].flatten())
            plt.title('原始信号')
            
            plt.subplot(1, 2, 2)
            plt.imshow(heatmap[:, :, np.newaxis], cmap='hot', aspect='auto')
            plt.title('Grad-CAM热力图')
            plt.colorbar()
            
            plt.tight_layout()
            plt.show()
            
            return heatmap.numpy()
        except Exception as e:
            print(f"Grad-CAM分析出错: {str(e)}")
            return None
    
    def feature_importance_analysis(self, X, y):
        """
        特征重要性分析
        
        Args:
            X (np.array): 特征矩阵
            y (np.array): 标签
            
        Returns:
            np.array: 特征重要性
        """
        # 训练随机森林获取特征重要性
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
        
        importance = rf.feature_importances_
        indices = np.argsort(importance)[::-1]
        
        plt.figure(figsize=(12, 8))
        plt.bar(range(len(indices)), importance[indices])
        plt.xticks(range(len(indices)), [self.feature_names[i] for i in indices], rotation=90)
        plt.title('特征重要性排序')
        plt.xlabel('特征')
        plt.ylabel('重要性')
        plt.tight_layout()
        plt.show()
        
        return importance
    
    def permutation_importance_analysis(self, X, y, n_repeats=10):
        """
        排列重要性分析
        
        Args:
            X (np.array): 特征矩阵
            y (np.array): 标签
            n_repeats (int): 重复次数
            
        Returns:
            np.array: 排列重要性
        """
        from sklearn.inspection import permutation_importance
        
        # 训练模型
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
        
        # 计算排列重要性
        perm_importance = permutation_importance(rf, X, y, n_repeats=n_repeats, random_state=42)
        
        # 可视化
        plt.figure(figsize=(12, 8))
        indices = np.argsort(perm_importance.importances_mean)[::-1]
        plt.bar(range(len(indices)), perm_importance.importances_mean[indices])
        plt.errorbar(range(len(indices)), perm_importance.importances_mean[indices],
                    yerr=perm_importance.importances_std[indices], fmt='none', color='red')
        plt.xticks(range(len(indices)), [self.feature_names[i] for i in indices], rotation=90)
        plt.title('排列重要性分析')
        plt.xlabel('特征')
        plt.ylabel('重要性')
        plt.tight_layout()
        plt.show()
        
        return perm_importance.importances_mean


class TransferProcessExplanation:
    """迁移过程解释类"""
    
    def __init__(self, source_features, target_features, source_labels, target_predictions):
        """
        初始化迁移过程解释器
        
        Args:
            source_features (np.array): 源域特征
            target_features (np.array): 目标域特征
            source_labels (np.array): 源域标签
            target_predictions (np.array): 目标域预测
        """
        self.source_features = source_features
        self.target_features = target_features
        self.source_labels = source_labels
        self.target_predictions = target_predictions
        
    def visualize_domain_alignment(self):
        """可视化域对齐效果"""
        # 使用t-SNE降维
        combined_features = np.vstack([self.source_features, self.target_features])
        tsne = TSNE(n_components=2, random_state=42)
        embedded = tsne.fit_transform(combined_features)
        
        # 创建域标签
        source_domain = np.zeros(len(self.source_features))
        target_domain = np.ones(len(self.target_features))
        domain_labels = np.concatenate([source_domain, target_domain])
        
        plt.figure(figsize=(15, 6))
        
        # 域分布
        plt.subplot(1, 2, 1)
        scatter = plt.scatter(embedded[:, 0], embedded[:, 1], c=domain_labels, 
                            cmap='coolwarm', alpha=0.7)
        plt.colorbar(scatter, ticks=[0, 1])
        plt.title('域分布 (源域=0, 目标域=1)')
        
        # 类别分布
        plt.subplot(1, 2, 2)
        combined_labels = np.concatenate([
            self.source_labels, 
            np.argmax(self.target_predictions, axis=1)
        ])
        scatter = plt.scatter(embedded[:, 0], embedded[:, 1], c=combined_labels, 
                            cmap='viridis', alpha=0.7)
        plt.colorbar(scatter)
        plt.title('类别分布')
        
        plt.tight_layout()
        plt.show()
    
    def analyze_feature_distribution(self):
        """分析特征分布差异"""
        # 计算特征分布差异
        distribution_differences = []
        for i in range(self.source_features.shape[1]):
            source_dist = np.histogram(self.source_features[:, i], bins=50, density=True)[0]
            target_dist = np.histogram(self.target_features[:, i], bins=50, density=True)[0]
            js_distance = jensenshannon(source_dist, target_dist)
            distribution_differences.append(js_distance)
        
        # 可视化分布差异
        plt.figure(figsize=(12, 6))
        plt.bar(range(len(distribution_differences)), distribution_differences, 
               color='lightcoral', alpha=0.7)
        plt.title('特征分布差异 (Jensen-Shannon距离)')
        plt.xlabel('特征索引')
        plt.ylabel('JS距离')
        plt.show()
        
        return distribution_differences
    
    def calculate_transfer_metrics(self):
        """计算迁移性能指标"""
        # 这里使用伪标签进行评估
        target_pred_labels = np.argmax(self.target_predictions, axis=1)
        
        metrics = {
            'source_accuracy': accuracy_score(self.source_labels, 
                                            np.argmax(self.source_features, axis=1) 
                                            if self.source_features.ndim > 1 else self.source_labels),
            'target_confidence': np.mean(np.max(self.target_predictions, axis=1)),
            'domain_alignment': self._calculate_domain_alignment(),
            'feature_similarity': self._calculate_feature_similarity()
        }
        
        return metrics
    
    def _calculate_domain_alignment(self):
        """计算域对齐程度"""
        # 构建域分类任务
        X_domain = np.vstack([self.source_features, self.target_features])
        y_domain = np.concatenate([np.zeros(len(self.source_features)), 
                                 np.ones(len(self.target_features))])
        
        # 训练域分类器
        clf = LinearSVC()
        scores = cross_val_score(clf, X_domain, y_domain, cv=5)
        
        # 域分类准确率越低，说明对齐越好
        return 1 - np.mean(scores)
    
    def _calculate_feature_similarity(self):
        """计算特征相似度"""
        source_mean = np.mean(self.source_features, axis=0)
        target_mean = np.mean(self.target_features, axis=0)
        
        return 1 - cosine(source_mean, target_mean)
    
    def visualize_transfer_learning_process(self):
        """可视化迁移学习过程"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 1. 域分布对比
        axes[0, 0].hist(self.source_features.flatten(), bins=50, alpha=0.7, 
                       label='源域', color='blue')
        axes[0, 0].hist(self.target_features.flatten(), bins=50, alpha=0.7, 
                       label='目标域', color='red')
        axes[0, 0].set_title('域分布对比')
        axes[0, 0].legend()
        
        # 2. 特征均值对比
        source_mean = np.mean(self.source_features, axis=0)
        target_mean = np.mean(self.target_features, axis=0)
        feature_indices = range(len(source_mean))
        
        axes[0, 1].plot(feature_indices, source_mean, label='源域均值', marker='o')
        axes[0, 1].plot(feature_indices, target_mean, label='目标域均值', marker='s')
        axes[0, 1].set_title('特征均值对比')
        axes[0, 1].legend()
        
        # 3. 预测置信度分布
        confidence = np.max(self.target_predictions, axis=1)
        axes[1, 0].hist(confidence, bins=30, alpha=0.7, color='green')
        axes[1, 0].set_title('目标域预测置信度分布')
        axes[1, 0].set_xlabel('置信度')
        axes[1, 0].set_ylabel('频次')
        
        # 4. 迁移指标雷达图
        metrics = self.calculate_transfer_metrics()
        metric_names = ['源域准确率', '目标域置信度', '域对齐度', '特征相似度']
        metric_values = [metrics['source_accuracy'], metrics['target_confidence'], 
                        metrics['domain_alignment'], metrics['feature_similarity']]
        
        angles = np.linspace(0, 2 * np.pi, len(metric_names), endpoint=False)
        metric_values += metric_values[:1]  # 闭合图形
        angles = np.concatenate((angles, [angles[0]]))
        
        axes[1, 1].plot(angles, metric_values, 'o-', linewidth=2)
        axes[1, 1].fill(angles, metric_values, alpha=0.25)
        axes[1, 1].set_xticks(angles[:-1])
        axes[1, 1].set_xticklabels(metric_names)
        axes[1, 1].set_title('迁移学习指标')
        axes[1, 1].grid(True)
        
        plt.tight_layout()
        plt.show()


class ComprehensiveExplanationReport:
    """综合可解释性报告生成类"""
    
    def __init__(self, model, data, feature_names, class_names):
        """
        初始化综合可解释性报告生成器
        
        Args:
            model: 要解释的模型
            data (np.array): 数据
            feature_names (list): 特征名称
            class_names (list): 类别名称
        """
        self.model = model
        self.data = data
        self.feature_names = feature_names
        self.class_names = class_names
        self.xai = ExplainableAIMethods(model, feature_names, class_names)
        
    def generate_comprehensive_report(self, sample_indices=None):
        """
        生成综合可解释性报告
        
        Args:
            sample_indices (list): 样本索引列表
            
        Returns:
            dict: 综合报告
        """
        if sample_indices is None:
            sample_indices = range(min(10, len(self.data)))
        
        report = {
            'global_analysis': self._global_analysis(),
            'local_analysis': self._local_analysis(sample_indices),
            'model_analysis': self._model_analysis(),
            'recommendations': self._generate_recommendations()
        }
        
        self._visualize_report(report)
        return report
    
    def _global_analysis(self):
        """全局分析"""
        # 特征重要性
        importance = self.xai.feature_importance_analysis(
            self.data.reshape(len(self.data), -1), 
            np.argmax(self.model.predict(self.data), axis=1)
        )
        
        return {
            'feature_importance': importance.tolist(),
            'top_features': self.feature_names[:10]
        }
    
    def _local_analysis(self, sample_indices):
        """局部分析"""
        local_explanations = []
        
        for idx in sample_indices:
            # SHAP分析
            shap_values = self.xai.shap_analysis(self.data[idx:idx+1])
            
            # LIME分析
            lime_exp = self.xai.lime_analysis(
                self.data.reshape(len(self.data), -1), 
                sample_index=idx
            )
            
            local_explanations.append({
                'sample_index': idx,
                'prediction': np.argmax(self.model.predict(self.data[idx:idx+1])[0]),
                'confidence': np.max(self.model.predict(self.data[idx:idx+1])[0]),
                'shap_values': shap_values[0].tolist() if shap_values is not None else [],
                'lime_explanation': str(lime_exp) if lime_exp is not None else "LIME分析不可用"
            })
        
        return local_explanations
    
    def _model_analysis(self):
        """模型分析"""
        # 模型复杂度
        trainable_params = np.sum([tf.keras.backend.count_params(w) for w in self.model.trainable_weights])
        
        return {
            'model_type': type(self.model).__name__,
            'trainable_parameters': int(trainable_params),
            'number_of_layers': len(self.model.layers)
        }
    
    def _generate_recommendations(self):
        """生成建议"""
        return {
            'model_improvement': [
                "考虑增加注意力机制以提高可解释性",
                "尝试不同的迁移学习策略",
                "优化特征选择过程"
            ],
            'deployment_suggestions': [
                "在生产环境中集成可解释性模块",
                "建立持续监控和反馈机制",
                "定期更新模型以适应数据分布变化"
            ]
        }
    
    def _visualize_report(self, report):
        """可视化报告"""
        fig = plt.figure(figsize=(20, 15))
        
        # 特征重要性
        plt.subplot(2, 2, 1)
        importance = report['global_analysis']['feature_importance']
        indices = np.argsort(importance)[::-1][:10]
        plt.bar(range(10), [importance[i] for i in indices])
        plt.xticks(range(10), [self.feature_names[i] for i in indices], rotation=45)
        plt.title('前10个重要特征')
        
        # 置信度分布
        plt.subplot(2, 2, 2)
        confidences = [exp['confidence'] for exp in report['local_analysis']]
        plt.hist(confidences, bins=20, alpha=0.7, color='skyblue')
        plt.title('预测置信度分布')
        plt.xlabel('置信度')
        plt.ylabel('频次')
        
        # 模型信息
        plt.subplot(2, 2, 3)
        model_info = report['model_analysis']
        info_text = f"模型类型: {model_info['model_type']}\n" \
                   f"可训练参数: {model_info['trainable_parameters']:,}\n" \
                   f"层数: {model_info['number_of_layers']}"
        plt.text(0.1, 0.5, info_text, fontsize=12, va='center')
        plt.axis('off')
        plt.title('模型信息')
        
        # 建议
        plt.subplot(2, 2, 4)
        recommendations = report['recommendations']
        rec_text = "模型改进建议:\n" + "\n".join(f"• {r}" for r in recommendations['model_improvement']) + \
                  "\n\n部署建议:\n" + "\n".join(f"• {r}" for r in recommendations['deployment_suggestions'])
        plt.text(0.1, 0.5, rec_text, fontsize=10, va='center')
        plt.axis('off')
        plt.title('建议')
        
        plt.tight_layout()
        plt.show()


class InterpretabilityPipeline:
    """可解释性分析管道类"""
    
    def __init__(self, model, feature_names, class_names):
        """
        初始化可解释性分析管道
        
        Args:
            model: 要分析的模型
            feature_names (list): 特征名称
            class_names (list): 类别名称
        """
        self.model = model
        self.feature_names = feature_names
        self.class_names = class_names
        self.xai = ExplainableAIMethods(model, feature_names, class_names)
        
    def run_comprehensive_analysis(self, X, y=None, sample_indices=None):
        """
        运行综合可解释性分析
        
        Args:
            X (np.array): 输入数据
            y (np.array): 标签（可选）
            sample_indices (list): 样本索引（可选）
            
        Returns:
            dict: 分析结果
        """
        results = {}
        
        # 1. 特征重要性分析
        print("1. 进行特征重要性分析...")
        if y is not None:
            importance = self.xai.feature_importance_analysis(X.reshape(X.shape[0], -1), y)
            results['feature_importance'] = importance
        
        # 2. SHAP分析
        print("2. 进行SHAP分析...")
        if sample_indices is None:
            sample_indices = range(min(10, len(X)))
        shap_values = self.xai.shap_analysis(X, sample_indices)
        results['shap_values'] = shap_values
        
        # 3. LIME分析
        print("3. 进行LIME分析...")
        lime_explanations = []
        for idx in sample_indices[:3]:  # 只分析前3个样本
            lime_exp = self.xai.lime_analysis(X.reshape(X.shape[0], -1), idx)
            lime_explanations.append(lime_exp)
        results['lime_explanations'] = lime_explanations
        
        # 4. Grad-CAM分析
        print("4. 进行Grad-CAM分析...")
        grad_cam_results = []
        for idx in sample_indices[:3]:  # 只分析前3个样本
            heatmap = self.xai.grad_cam_analysis(X, idx)
            grad_cam_results.append(heatmap)
        results['grad_cam_results'] = grad_cam_results
        
        return results


def main():
    """主函数示例"""
    # 生成示例数据
    np.random.seed(42)
    X = np.random.randn(100, 30, 1)
    y = np.random.randint(0, 4, 100)
    
    # 创建简单的模型
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Flatten
    
    model = Sequential([
        Flatten(input_shape=(30, 1)),
        Dense(64, activation='relu'),
        Dense(32, activation='relu'),
        Dense(4, activation='softmax')
    ])
    
    model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    model.fit(X, y, epochs=10, verbose=0)
    
    # 特征名称和类别名称
    feature_names = [f'feature_{i}' for i in range(30)]
    class_names = ['正常', '外圈故障', '内圈故障', '滚动体故障']
    
    # 创建可解释性分析管道
    pipeline = InterpretabilityPipeline(model, feature_names, class_names)
    
    # 运行综合分析
    results = pipeline.run_comprehensive_analysis(X, y, sample_indices=range(5))
    
    # 生成综合报告
    report_generator = ComprehensiveExplanationReport(
        model=model,
        data=X,
        feature_names=feature_names,
        class_names=class_names
    )
    
    comprehensive_report = report_generator.generate_comprehensive_report(sample_indices=range(3))
    
    print("可解释性分析完成！")


if __name__ == "__main__":
    main()
