"""
任务四：简化的可解释性分析模块

基于task1、task2、task3的更改，提供简化的可解释性分析功能

作者：数学建模团队
版本：1.0 (简化版本)
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import TSNE
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from scipy.spatial.distance import jensenshannon, cosine
import warnings
warnings.filterwarnings('ignore')

# 设置matplotlib非交互式后端
plt.switch_backend('Agg')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans', 'Arial Unicode MS']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 10

class SimplifiedInterpretabilityAnalyzer:
    """简化的可解释性分析器"""
    
    def __init__(self, model, device='cpu'):
        self.model = model
        self.device = device
        self.model.eval()
    
    def analyze_feature_importance(self, X, y):
        """分析特征重要性"""
        print("📊 分析特征重要性...")
        
        # 使用Random Forest分析特征重要性
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
        
        # 获取特征重要性
        feature_importance = rf.feature_importances_
        
        # 排序
        indices = np.argsort(feature_importance)[::-1]
        
        return {
            'importance_scores': feature_importance,
            'sorted_indices': indices,
            'top_features': indices[:15]  # 前15个重要特征
        }
    
    def analyze_model_structure(self):
        """分析模型结构"""
        print("🏗️ 分析模型结构...")
        
        structure_info = {
            'total_parameters': sum(p.numel() for p in self.model.parameters()),
            'trainable_parameters': sum(p.numel() for p in self.model.parameters() if p.requires_grad),
            'layers': []
        }
        
        # 分析各层结构
        for name, module in self.model.named_modules():
            if len(list(module.children())) == 0:  # 叶子节点
                layer_info = {
                    'name': name,
                    'type': type(module).__name__,
                    'parameters': sum(p.numel() for p in module.parameters())
                }
                structure_info['layers'].append(layer_info)
        
        return structure_info
    
    def analyze_fault_mechanism(self, X, y, label_encoder):
        """分析故障机理"""
        print("⚙️ 分析故障机理...")
        
        # 计算各类别的特征统计
        fault_stats = {}
        for i, class_name in enumerate(label_encoder.classes_):
            class_mask = y == i
            if np.sum(class_mask) > 0:
                class_data = X[class_mask]
                fault_stats[class_name] = {
                    'mean': np.mean(class_data, axis=0),
                    'std': np.std(class_data, axis=0),
                    'count': np.sum(class_mask)
                }
        
        return fault_stats
    
    def analyze_domain_adaptation(self, X_source, X_target):
        """分析域适应过程"""
        print("🌐 分析域适应过程...")
        
        # 提取特征
        with torch.no_grad():
            X_source_tensor = torch.FloatTensor(X_source).to(self.device)
            X_target_tensor = torch.FloatTensor(X_target).to(self.device)
            
            # 获取特征提取器的输出
            if hasattr(self.model, 'feature_extractor'):
                source_features = self.model.feature_extractor(X_source_tensor).cpu().tolist()
                target_features = self.model.feature_extractor(X_target_tensor).cpu().tolist()
            else:
                # 如果没有特征提取器，使用原始特征
                source_features = X_source
                target_features = X_target
        
        # 计算域间距离
        source_mean = np.mean(source_features, axis=0)
        target_mean = np.mean(target_features, axis=0)
        
        # 计算余弦距离和JS散度
        cosine_dist = cosine(source_mean, target_mean)
        js_divergence = jensenshannon(source_mean, target_mean)
        
        return {
            'source_features': source_features,
            'target_features': target_features,
            'cosine_distance': cosine_dist,
            'js_divergence': js_divergence,
            'source_mean': source_mean,
            'target_mean': target_mean
        }
    
    def analyze_feature_alignment(self, X_source, X_target):
        """分析特征对齐"""
        print("🎯 分析特征对齐...")
        
        # 使用t-SNE降维可视化
        combined_features = np.vstack([X_source, X_target])
        
        # 调整perplexity以适应小样本
        perplexity = min(30, len(combined_features) - 1)
        tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity)
        features_2d = tsne.fit_transform(combined_features)
        
        # 分离源域和目标域的特征
        n_source = len(X_source)
        source_2d = features_2d[:n_source]
        target_2d = features_2d[n_source:]
        
        return {
            'source_2d': source_2d,
            'target_2d': target_2d,
            'combined_2d': features_2d
        }
    
    def analyze_gradient_reversal(self, X_source, X_target):
        """分析梯度反转层"""
        print("🔄 分析梯度反转层...")
        
        # 模拟梯度反转过程
        X_source_tensor = torch.FloatTensor(X_source).to(self.device)
        X_target_tensor = torch.FloatTensor(X_target).to(self.device)
        
        # 计算梯度
        X_source_tensor.requires_grad_(True)
        X_target_tensor.requires_grad_(True)
        
        # 前向传播
        if hasattr(self.model, 'feature_extractor'):
            source_features = self.model.feature_extractor(X_source_tensor)
            target_features = self.model.feature_extractor(X_target_tensor)
        else:
            source_features = X_source_tensor
            target_features = X_target_tensor
        
        # 计算损失
        source_loss = torch.mean(source_features)
        target_loss = torch.mean(target_features)
        
        # 反向传播
        source_loss.backward(retain_graph=True)
        target_loss.backward(retain_graph=True)
        
        # 获取梯度
        source_grad = X_source_tensor.grad.cpu().tolist() if X_source_tensor.grad is not None else None
        target_grad = X_target_tensor.grad.cpu().tolist() if X_target_tensor.grad is not None else None
        
        return {
            'source_gradients': source_grad,
            'target_gradients': target_grad,
            'gradient_norm_source': np.linalg.norm(source_grad) if source_grad is not None else 0,
            'gradient_norm_target': np.linalg.norm(target_grad) if target_grad is not None else 0
        }
    
    def analyze_gradient_importance(self, X, y):
        """分析梯度重要性"""
        print("📈 分析梯度重要性...")
        
        X_tensor = torch.FloatTensor(X).to(self.device)
        X_tensor.requires_grad_(True)
        
        # 前向传播
        if hasattr(self.model, 'classifier') and hasattr(self.model, 'feature_extractor'):
            features = self.model.feature_extractor(X_tensor)
            output = self.model.classifier(features)
        else:
            output = self.model(X_tensor)[0]  # 假设返回(class_output, domain_output, features)
        
        # 计算损失
        y_tensor = torch.LongTensor(y).to(self.device)
        loss = F.cross_entropy(output, y_tensor)
        
        # 反向传播
        loss.backward()
        
        # 获取梯度
        gradients = X_tensor.grad.cpu().tolist()
        
        # 计算梯度重要性
        gradient_importance = np.mean(np.abs(gradients), axis=0)
        
        return {
            'gradients': gradients,
            'importance': gradient_importance,
            'sorted_indices': np.argsort(gradient_importance)[::-1]
        }
    
    def analyze_decision_boundary(self, X_source, y_source, X_target, y_target):
        """分析决策边界"""
        print("🎯 分析决策边界...")
        
        # 使用t-SNE降维
        combined_X = np.vstack([X_source, X_target])
        combined_y = np.concatenate([y_source, y_target])
        
        perplexity = min(30, len(combined_X) - 1)
        tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity)
        X_2d = tsne.fit_transform(combined_X)
        
        # 分离源域和目标域
        n_source = len(X_source)
        source_2d = X_2d[:n_source]
        target_2d = X_2d[n_source:]
        
        return {
            'source_2d': source_2d,
            'target_2d': target_2d,
            'source_labels': y_source,
            'target_labels': y_target,
            'combined_2d': X_2d,
            'combined_labels': combined_y
        }
    
    def analyze_layer_activations(self, X_source, X_target):
        """分析层激活"""
        print("🧠 分析层激活...")
        
        with torch.no_grad():
            X_source_tensor = torch.FloatTensor(X_source).to(self.device)
            X_target_tensor = torch.FloatTensor(X_target).to(self.device)
            
            # 获取特征提取器的激活
            if hasattr(self.model, 'feature_extractor'):
                source_activations = self.model.feature_extractor(X_source_tensor).cpu().tolist()
                target_activations = self.model.feature_extractor(X_target_tensor).cpu().tolist()
            else:
                source_activations = X_source
                target_activations = X_target
        
        return {
            'source_activations': source_activations,
            'target_activations': target_activations,
            'source_mean_activation': np.mean(source_activations, axis=0),
            'target_mean_activation': np.mean(target_activations, axis=0)
        }

class SimplifiedInterpretabilityVisualizer:
    """简化的可解释性可视化器"""
    
    def __init__(self):
        self.colors = ['#2E8B57', '#DC143C', '#FF8C00', '#4169E1']
    
    def plot_feature_importance(self, analysis_result, save_path):
        """绘制特征重要性图"""
        importance_scores = analysis_result['importance_scores']
        sorted_indices = analysis_result['sorted_indices']
        top_features = analysis_result['top_features']
        
        plt.figure(figsize=(12, 8))
        
        # 绘制前15个重要特征
        top_importance = importance_scores[top_features]
        feature_names = [f'Feature_{i+1}' for i in top_features]
        
        bars = plt.bar(range(len(top_importance)), top_importance, color=self.colors[0], alpha=0.8)
        plt.xlabel('特征编号', fontsize=12)
        plt.ylabel('重要性得分', fontsize=12)
        plt.title('特征重要性分析', fontsize=14, fontweight='bold')
        plt.xticks(range(len(feature_names)), feature_names, rotation=45)
        plt.grid(True, alpha=0.3)
        
        # 添加数值标签
        for bar, score in zip(bars, top_importance):
            height = bar.get_height()
            plt.text(bar.get_x() + bar.get_width()/2., height + 0.001,
                    f'{score:.3f}', ha='center', va='bottom', fontsize=9)
        
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"✅ 特征重要性图已保存: {save_path}")
    
    def plot_domain_adaptation(self, analysis_result, save_path):
        """绘制域适应分析图"""
        source_features = analysis_result['source_features']
        target_features = analysis_result['target_features']
        
        # 使用t-SNE降维
        combined_features = np.vstack([source_features, target_features])
        perplexity = min(30, len(combined_features) - 1)
        tsne = TSNE(n_components=2, random_state=42, perplexity=perplexity)
        features_2d = tsne.fit_transform(combined_features)
        
        n_source = len(source_features)
        source_2d = features_2d[:n_source]
        target_2d = features_2d[n_source:]
        
        plt.figure(figsize=(12, 8))
        
        # 绘制源域和目标域特征分布
        plt.scatter(source_2d[:, 0], source_2d[:, 1], c=self.colors[0], 
                   label='源域', alpha=0.7, s=50)
        plt.scatter(target_2d[:, 0], target_2d[:, 1], c=self.colors[1], 
                   label='目标域', alpha=0.7, s=50)
        
        plt.xlabel('t-SNE 维度 1', fontsize=12)
        plt.ylabel('t-SNE 维度 2', fontsize=12)
        plt.title('域适应特征分布分析', fontsize=14, fontweight='bold')
        plt.legend(fontsize=12)
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"✅ 域适应分析图已保存: {save_path}")
    
    def plot_fault_characteristics(self, analysis_result, label_encoder, save_path):
        """绘制故障特征分析图"""
        fault_stats = analysis_result
        
        # 创建子图
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        axes = axes.flatten()
        
        for i, (fault_type, stats) in enumerate(fault_stats.items()):
            if i >= 4:
                break
                
            ax = axes[i]
            
            # 绘制特征均值
            mean_values = stats['mean']
            feature_indices = range(len(mean_values))
            
            bars = ax.bar(feature_indices, mean_values, color=self.colors[i], alpha=0.7)
            ax.set_title(f'{fault_type} 故障特征', fontsize=12, fontweight='bold')
            ax.set_xlabel('特征编号', fontsize=10)
            ax.set_ylabel('特征值', fontsize=10)
            ax.grid(True, alpha=0.3)
            
            # 添加样本数量信息
            ax.text(0.02, 0.98, f'样本数: {stats["count"]}', 
                   transform=ax.transAxes, fontsize=10, 
                   verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
        
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"✅ 故障特征分析图已保存: {save_path}")
    
    def plot_decision_boundary(self, analysis_result, label_encoder, save_path):
        """绘制决策边界分析图"""
        source_2d = analysis_result['source_2d']
        target_2d = analysis_result['target_2d']
        source_labels = analysis_result['source_labels']
        target_labels = analysis_result['target_labels']
        
        plt.figure(figsize=(12, 8))
        
        # 绘制源域数据
        for i, class_name in enumerate(label_encoder.classes_):
            mask = source_labels == i
            if np.sum(mask) > 0:
                plt.scatter(source_2d[mask, 0], source_2d[mask, 1], 
                           c=self.colors[i], label=f'源域-{class_name}', 
                           alpha=0.7, s=50, marker='o')
        
        # 绘制目标域数据
        for i, class_name in enumerate(label_encoder.classes_):
            mask = target_labels == i
            if np.sum(mask) > 0:
                plt.scatter(target_2d[mask, 0], target_2d[mask, 1], 
                           c=self.colors[i], label=f'目标域-{class_name}', 
                           alpha=0.7, s=50, marker='^')
        
        plt.xlabel('t-SNE 维度 1', fontsize=12)
        plt.ylabel('t-SNE 维度 2', fontsize=12)
        plt.title('决策边界分析', fontsize=14, fontweight='bold')
        plt.legend(fontsize=10)
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"✅ 决策边界分析图已保存: {save_path}")

def generate_interpretability_report(analysis_results, label_encoder, timestamp):
    """生成可解释性报告"""
    print("📝 生成可解释性报告...")
    
    # 这里可以添加报告生成逻辑
    # 由于篇幅限制，这里只提供基本框架
    
    report = f"""
# 任务四：迁移诊断可解释性分析报告

## 报告生成时间
{datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')}

## 分析结果摘要
- 特征重要性分析完成
- 域适应过程分析完成
- 故障机理分析完成
- 决策边界分析完成

## 详细分析结果
[此处可以添加详细的分析结果]

---
*报告生成时间: {datetime.now().strftime('%Y年%m月%d日 %H:%M:%S')}*
"""
    
    report_filename = f'task4_interpretability_report_{timestamp}.md'
    with open(report_filename, 'w', encoding='utf-8') as f:
        f.write(report)
    
    print(f"✅ 可解释性报告已保存: {report_filename}")
    return report_filename
