class ComprehensiveExplanationReport:
    def __init__(self, model, data, feature_names, class_names):
        self.model = model
        self.data = data
        self.feature_names = feature_names
        self.class_names = class_names
        self.xai = ExplainableAIMethods(model, feature_names, class_names)
        
    def generate_comprehensive_report(self, sample_indices=None):
        """生成综合可解释性报告"""
        if sample_indices is None:
            sample_indices = range(min(10, len(self.data)))
        
        report = {
            'global_analysis': self._global_analysis(),
            'local_analysis': self._local_analysis(sample_indices),
            'model_analysis': self._model_analysis(),
            'recommendations': self._generate_recommendations()
        }
        
        self._visualize_report(report)
        return report
    
    def _global_analysis(self):
        """全局分析"""
        # 特征重要性
        importance = self.xai.feature_importance_analysis(
            self.data.reshape(len(self.data), -1), 
            np.argmax(self.model.predict(self.data), axis=1)
        )
        
        return {
            'feature_importance': importance.tolist(),
            'top_features': self.feature_names[:10]
        }
    
    def _local_analysis(self, sample_indices):
        """局部分析"""
        local_explanations = []
        
        for idx in sample_indices:
            # SHAP分析
            shap_values = self.xai.shap_analysis(self.data[idx:idx+1])
            
            # LIME分析
            lime_exp = self.xai.lime_analysis(
                self.data.reshape(len(self.data), -1), 
                sample_index=idx
            )
            
            local_explanations.append({
                'sample_index': idx,
                'prediction': np.argmax(self.model.predict(self.data[idx:idx+1])[0]),
                'confidence': np.max(self.model.predict(self.data[idx:idx+1])[0]),
                'shap_values': shap_values[0].tolist() if shap_values is not None else [],
                'lime_explanation': str(lime_exp)
            })
        
        return local_explanations
    
    def _model_analysis(self):
        """模型分析"""
        # 模型复杂度
        trainable_params = np.sum([tf.keras.backend.count_params(w) for w in self.model.trainable_weights])
        
        return {
            'model_type': type(self.model).__name__,
            'trainable_parameters': int(trainable_params),
            'number_of_layers': len(self.model.layers)
        }
    
    def _generate_recommendations(self):
        """生成建议"""
        return {
            'model_improvement': [
                "考虑增加注意力机制以提高可解释性",
                "尝试不同的迁移学习策略",
                "优化特征选择过程"
            ],
            'deployment_suggestions': [
                "在生产环境中集成可解释性模块",
                "建立持续监控和反馈机制",
                "定期更新模型以适应数据分布变化"
            ]
        }
    
    def _visualize_report(self, report):
        """可视化报告"""
        fig = plt.figure(figsize=(20, 15))
        
        # 特征重要性
        plt.subplot(2, 2, 1)
        importance = report['global_analysis']['feature_importance']
        indices = np.argsort(importance)[::-1][:10]
        plt.bar(range(10), [importance[i] for i in indices])
        plt.xticks(range(10), [self.feature_names[i] for i in indices], rotation=45)
        plt.title('Top 10 Important Features')
        
        # 置信度分布
        plt.subplot(2, 2, 2)
        confidences = [exp['confidence'] for exp in report['local_analysis']]
        plt.hist(confidences, bins=20, alpha=0.7)
        plt.title('Prediction Confidence Distribution')
        
        # 模型信息
        plt.subplot(2, 2, 3)
        model_info = report['model_analysis']
        info_text = f"Model Type: {model_info['model_type']}\n" \
                   f"Trainable Parameters: {model_info['trainable_parameters']:,}\n" \
                   f"Number of Layers: {model_info['number_of_layers']}"
        plt.text(0.1, 0.5, info_text, fontsize=12, va='center')
        plt.axis('off')
        plt.title('Model Information')
        
        # 建议
        plt.subplot(2, 2, 4)
        recommendations = report['recommendations']
        rec_text = "Model Improvements:\n" + "\n".join(f"• {r}" for r in recommendations['model_improvement']) + \
                  "\n\nDeployment Suggestions:\n" + "\n".join(f"• {r}" for r in recommendations['deployment_suggestions'])
        plt.text(0.1, 0.5, rec_text, fontsize=10, va='center')
        plt.axis('off')
        plt.title('Recommendations')
        
        plt.tight_layout()
        plt.show()

# 生成综合报告
report_generator = ComprehensiveExplanationReport(
    model=model,
    data=X_train.reshape(-1, X_train.shape[1], 1)[:100],
    feature_names=feature_names,
    class_names=class_names
)

comprehensive_report = report_generator.generate_comprehensive_report(sample_indices=range(5))
