class ExplainableAIMethods:
    def __init__(self, model, feature_names, class_names):
        self.model = model
        self.feature_names = feature_names
        self.class_names = class_names
        
    def shap_analysis(self, X, sample_indices=None):
        """SHAP分析"""
        import shap
        
        if sample_indices is None:
            sample_indices = range(min(100, len(X)))
        
        # 创建解释器
        explainer = shap.DeepExplainer(self.model, X)
        
        # 计算SHAP值
        shap_values = explainer.shap_values(X[sample_indices])
        
        # 可视化
        plt.figure(figsize=(15, 10))
        
        # 摘要图
        plt.subplot(2, 2, 1)
        shap.summary_plot(shap_values, X[sample_indices], feature_names=self.feature_names)
        plt.title('SHAP Summary Plot')
        
        # 特征重要性
        plt.subplot(2, 2, 2)
        shap.summary_plot(shap_values, X[sample_indices], plot_type="bar", 
                         feature_names=self.feature_names)
        plt.title('Feature Importance')
        
        # 单个样本解释
        if len(sample_indices) > 0:
            plt.subplot(2, 2, 3)
            shap.force_plot(explainer.expected_value[0], shap_values[0][0], 
                           X[sample_indices][0], feature_names=self.feature_names,
                           matplotlib=True)
            plt.title('Force Plot for Sample 0')
        
        plt.tight_layout()
        plt.show()
        
        return shap_values
    
    def lime_analysis(self, X, sample_index=0):
        """LIME分析"""
        import lime
        import lime.lime_tabular
        
        # 创建LIME解释器
        explainer = lime.lime_tabular.LimeTabularExplainer(
            X, feature_names=self.feature_names, 
            class_names=self.class_names, mode='classification'
        )
        
        # 解释单个样本
        exp = explainer.explain_instance(X[sample_index], self.model.predict, num_features=10)
        
        # 可视化
        plt.figure(figsize=(12, 8))
        exp.as_pyplot_figure()
        plt.title(f'LIME Explanation for Sample {sample_index}')
        plt.tight_layout()
        plt.show()
        
        return exp
    
    def grad_cam_analysis(self, X, sample_index=0, layer_name='conv1d_2'):
        """Grad-CAM分析"""
        # 获取指定层的输出和梯度
        grad_model = tf.keras.models.Model(
            [self.model.inputs], 
            [self.model.get_layer(layer_name).output, self.model.output]
        )
        
        with tf.GradientTape() as tape:
            conv_outputs, predictions = grad_model(X[sample_index:sample_index+1])
            loss = predictions[:, tf.argmax(predictions[0])]
        
        # 计算梯度
        grads = tape.gradient(loss, conv_outputs)
        pooled_grads = tf.reduce_mean(grads, axis=(0, 1))
        
        # 计算热力图
        conv_outputs = conv_outputs[0]
        heatmap = tf.reduce_mean(tf.multiply(conv_outputs, pooled_grads), axis=-1)
        heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
        
        # 可视化
        plt.figure(figsize=(12, 6))
        plt.subplot(1, 2, 1)
        plt.plot(X[sample_index].flatten())
        plt.title('Original Signal')
        
        plt.subplot(1, 2, 2)
        plt.imshow(heatmap[:, :, np.newaxis], cmap='hot', aspect='auto')
        plt.title('Grad-CAM Heatmap')
        plt.colorbar()
        
        plt.tight_layout()
        plt.show()
        
        return heatmap.numpy()
    
    def feature_importance_analysis(self, X, y):
        """特征重要性分析"""
        from sklearn.ensemble import RandomForestClassifier
        
        # 训练随机森林获取特征重要性
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
        
        importance = rf.feature_importances_
        indices = np.argsort(importance)[::-1]
        
        plt.figure(figsize=(12, 8))
        plt.bar(range(len(indices)), importance[indices])
        plt.xticks(range(len(indices)), [self.feature_names[i] for i in indices], rotation=90)
        plt.title('Feature Importance Ranking')
        plt.tight_layout()
        plt.show()
        
        return importance

# 可解释性分析示例
# 准备特征名称和类别名称
feature_names = X.columns.tolist()[:30]  # 取前30个特征
class_names = ['Normal', 'Outer Race', 'Inner Race', 'Ball']

xai = ExplainableAIMethods(model, feature_names, class_names)

# SHAP分析
shap_values = xai.shap_analysis(X_train.reshape(-1, X_train.shape[1], 1)[:100])

# LIME分析
lime_exp = xai.lime_analysis(X_train[:100], sample_index=0)

# 特征重要性分析
feature_importance = xai.feature_importance_analysis(X_train, y_train)
