"""
高速列车轴承智能故障诊断 - 域适应迁移学习模块

本模块包含：
1. 域适应模型构建（DANN、Deep CORAL等）
2. 迁移学习训练策略
3. 目标域诊断和可视化

作者：数学建模团队
版本：1.0
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, f1_score, classification_report, confusion_matrix
from scipy.spatial.distance import jensenshannon, cosine
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import (Dense, Conv1D, MaxPooling1D, BatchNormalization, 
                                   Activation, Input, Dropout, GlobalAveragePooling1D,
                                   MultiHeadAttention, LayerNormalization)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False


class DomainAdaptationModels:
    """域适应模型构建类"""
    
    def __init__(self, input_shape, num_classes):
        """
        初始化域适应模型构建器
        
        Args:
            input_shape (tuple): 输入形状
            num_classes (int): 类别数量
        """
        self.input_shape = input_shape
        self.num_classes = num_classes
        
    def build_dann_model(self):
        """构建域对抗神经网络(DANN)"""
        # 特征提取器
        def feature_extractor():
            inputs = Input(shape=self.input_shape)
            x = Conv1D(64, 3, activation='relu')(inputs)
            x = BatchNormalization()(x)
            x = MaxPooling1D(2)(x)
            x = Conv1D(128, 3, activation='relu')(x)
            x = BatchNormalization()(x)
            x = MaxPooling1D(2)(x)
            x = Conv1D(256, 3, activation='relu')(x)
            x = BatchNormalization()(x)
            x = GlobalAveragePooling1D()(x)
            return Model(inputs, x, name='feature_extractor')
        
        # 标签分类器
        def label_classifier():
            inputs = Input(shape=(256,))
            x = Dense(128, activation='relu')(inputs)
            x = Dropout(0.5)(x)
            x = Dense(64, activation='relu')(x)
            outputs = Dense(self.num_classes, activation='softmax', name='label_output')(x)
            return Model(inputs, outputs, name='label_classifier')
        
        # 域分类器
        def domain_classifier():
            inputs = Input(shape=(256,))
            x = Dense(128, activation='relu')(inputs)
            x = Dropout(0.5)(x)
            x = Dense(64, activation='relu')(x)
            outputs = Dense(1, activation='sigmoid', name='domain_output')(x)
            return Model(inputs, outputs, name='domain_classifier')
        
        # 构建完整模型
        feat_extractor = feature_extractor()
        label_clf = label_classifier()
        domain_clf = domain_classifier()
        
        # 源域输入
        source_input = Input(shape=self.input_shape, name='source_input')
        
        # 目标域输入
        target_input = Input(shape=self.input_shape, name='target_input')
        
        # 源域路径
        source_features = feat_extractor(source_input)
        source_label = label_clf(source_features)
        
        # 目标域路径
        target_features = feat_extractor(target_input)
        target_label = label_clf(target_features)
        
        # 域分类路径
        combined_features = tf.concat([source_features, target_features], axis=0)
        domain_pred = domain_clf(combined_features)
        
        return Model(
            inputs=[source_input, target_input],
            outputs=[source_label, target_label, domain_pred],
            name='dann_model'
        )
    
    def build_deep_coral_model(self):
        """构建Deep CORAL模型"""
        # 实现Deep CORAL损失
        def coral_loss(source_features, target_features):
            # 计算二阶统计量差异
            source_cov = tf.linalg.matmul(tf.transpose(source_features), source_features)
            target_cov = tf.linalg.matmul(tf.transpose(target_features), target_features)
            return tf.reduce_mean(tf.square(source_cov - target_cov))
        
        # 构建模型
        base_model = Sequential([
            Conv1D(64, 3, activation='relu', input_shape=self.input_shape),
            BatchNormalization(),
            MaxPooling1D(2),
            Conv1D(128, 3, activation='relu'),
            BatchNormalization(),
            MaxPooling1D(2),
            Conv1D(256, 3, activation='relu'),
            BatchNormalization(),
            GlobalAveragePooling1D(),
            Dense(128, activation='relu'),
            Dropout(0.5)
        ], name='feature_extractor')
        
        classifier = Dense(self.num_classes, activation='softmax', name='classifier')
        
        source_input = Input(shape=self.input_shape, name='source_input')
        target_input = Input(shape=self.input_shape, name='target_input')
        
        source_features = base_model(source_input)
        target_features = base_model(target_input)
        
        source_output = classifier(source_features)
        
        # 添加CORAL损失
        coral_loss_value = coral_loss(source_features, target_features)
        
        return Model(
            inputs=[source_input, target_input],
            outputs=[source_output, coral_loss_value],
            name='deep_coral_model'
        )
    
    def build_adversarial_domain_adaptation(self):
        """构建对抗域适应模型"""
        # 特征提取器
        def feature_extractor():
            inputs = Input(shape=self.input_shape)
            x = Conv1D(64, 3, activation='relu')(inputs)
            x = BatchNormalization()(x)
            x = MaxPooling1D(2)(x)
            x = Conv1D(128, 3, activation='relu')(x)
            x = BatchNormalization()(x)
            x = MaxPooling1D(2)(x)
            x = Conv1D(256, 3, activation='relu')(x)
            x = BatchNormalization()(x)
            x = GlobalAveragePooling1D()(x)
            return Model(inputs, x, name='feature_extractor')
        
        # 标签分类器
        def label_classifier():
            inputs = Input(shape=(256,))
            x = Dense(128, activation='relu')(inputs)
            x = Dropout(0.5)(x)
            x = Dense(64, activation='relu')(x)
            outputs = Dense(self.num_classes, activation='softmax', name='label_output')(x)
            return Model(inputs, outputs, name='label_classifier')
        
        # 域分类器（梯度反转）
        def domain_classifier():
            inputs = Input(shape=(256,))
            x = Dense(128, activation='relu')(inputs)
            x = Dropout(0.5)(x)
            x = Dense(64, activation='relu')(x)
            outputs = Dense(1, activation='sigmoid', name='domain_output')(x)
            return Model(inputs, outputs, name='domain_classifier')
        
        # 梯度反转层
        class GradientReversal(tf.keras.layers.Layer):
            def __init__(self, **kwargs):
                super(GradientReversal, self).__init__(**kwargs)
            
            def call(self, x, training=None):
                return tf.stop_gradient(x)
            
            def get_config(self):
                config = super().get_config()
                return config
        
        # 构建模型
        feat_extractor = feature_extractor()
        label_clf = label_classifier()
        domain_clf = domain_classifier()
        
        # 输入
        source_input = Input(shape=self.input_shape, name='source_input')
        target_input = Input(shape=self.input_shape, name='target_input')
        
        # 特征提取
        source_features = feat_extractor(source_input)
        target_features = feat_extractor(target_input)
        
        # 标签预测
        source_label = label_clf(source_features)
        target_label = label_clf(target_features)
        
        # 域预测（带梯度反转）
        combined_features = tf.concat([source_features, target_features], axis=0)
        reversed_features = GradientReversal()(combined_features)
        domain_pred = domain_clf(reversed_features)
        
        return Model(
            inputs=[source_input, target_input],
            outputs=[source_label, target_label, domain_pred],
            name='adversarial_da_model'
        )


class TransferLearningTrainer:
    """迁移学习训练器类"""
    
    def __init__(self, model, model_type='dann'):
        """
        初始化迁移学习训练器
        
        Args:
            model: 迁移学习模型
            model_type (str): 模型类型
        """
        self.model = model
        self.model_type = model_type
        self.history = None
        
    def compile_model(self, alpha=1.0):
        """
        编译迁移学习模型
        
        Args:
            alpha (float): 域适应权重
        """
        if self.model_type == 'dann':
            self.model.compile(
                optimizer=Adam(0.001),
                loss={
                    'label_output': 'categorical_crossentropy',
                    'domain_output': 'binary_crossentropy'
                },
                loss_weights={
                    'label_output': 1.0,
                    'domain_output': alpha
                },
                metrics={'label_output': ['accuracy']}
            )
        elif self.model_type == 'deep_coral':
            self.model.compile(
                optimizer=Adam(0.001),
                loss={
                    'classifier': 'categorical_crossentropy',
                    'coral_loss': lambda y_true, y_pred: y_pred
                },
                loss_weights={
                    'classifier': 1.0,
                    'coral_loss': 0.5
                },
                metrics={'classifier': ['accuracy']}
            )
        elif self.model_type == 'adversarial':
            self.model.compile(
                optimizer=Adam(0.001),
                loss={
                    'label_output': 'categorical_crossentropy',
                    'domain_output': 'binary_crossentropy'
                },
                loss_weights={
                    'label_output': 1.0,
                    'domain_output': alpha
                },
                metrics={'label_output': ['accuracy']}
            )
    
    def train_dann(self, source_data, target_data, epochs=100, batch_size=32):
        """
        训练DANN模型
        
        Args:
            source_data (tuple): 源域数据 (X, y)
            target_data (tuple): 目标域数据 (X, y)
            epochs (int): 训练轮数
            batch_size (int): 批次大小
        """
        X_source, y_source = source_data
        X_target, _ = target_data  # 目标域无标签
        
        # 准备域标签
        source_domain_labels = np.zeros((len(X_source), 1))
        target_domain_labels = np.ones((len(X_target), 1))
        
        domain_labels = np.concatenate([source_domain_labels, target_domain_labels], axis=0)
        
        callbacks = [
            EarlyStopping(patience=15, restore_best_weights=True),
            ReduceLROnPlateau(factor=0.5, patience=5)
        ]
        
        self.history = self.model.fit(
            [X_source, X_target],
            [y_source, y_source, domain_labels],  # 注意：这里使用伪标签
            epochs=epochs,
            batch_size=batch_size,
            callbacks=callbacks,
            verbose=1
        )
    
    def train_deep_coral(self, source_data, target_data, epochs=100, batch_size=32):
        """
        训练Deep CORAL模型
        
        Args:
            source_data (tuple): 源域数据 (X, y)
            target_data (tuple): 目标域数据 (X, y)
            epochs (int): 训练轮数
            batch_size (int): 批次大小
        """
        X_source, y_source = source_data
        X_target, _ = target_data
        
        # 伪损失值
        coral_dummy = np.zeros((len(X_source), 1))
        
        self.history = self.model.fit(
            [X_source, X_target],
            [y_source, coral_dummy],
            epochs=epochs,
            batch_size=batch_size,
            verbose=1
        )
    
    def plot_training_history(self):
        """绘制训练历史"""
        if self.history is None:
            print("模型尚未训练，无法绘制训练历史")
            return
            
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        
        # 标签分类损失
        axes[0, 0].plot(self.history.history['label_output_loss'], label='训练损失')
        axes[0, 0].plot(self.history.history['val_label_output_loss'], label='验证损失')
        axes[0, 0].set_title('标签分类损失')
        axes[0, 0].legend()
        
        # 标签分类准确率
        axes[0, 1].plot(self.history.history['label_output_accuracy'], label='训练准确率')
        axes[0, 1].plot(self.history.history['val_label_output_accuracy'], label='验证准确率')
        axes[0, 1].set_title('标签分类准确率')
        axes[0, 1].legend()
        
        # 域分类损失
        axes[1, 0].plot(self.history.history['domain_output_loss'], label='训练损失')
        axes[1, 0].plot(self.history.history['val_domain_output_loss'], label='验证损失')
        axes[1, 0].set_title('域分类损失')
        axes[1, 0].legend()
        
        # 总损失
        axes[1, 1].plot(self.history.history['loss'], label='训练总损失')
        axes[1, 1].plot(self.history.history['val_loss'], label='验证总损失')
        axes[1, 1].set_title('总损失')
        axes[1, 1].legend()
        
        plt.tight_layout()
        plt.show()


class TargetDomainDiagnosis:
    """目标域诊断类"""
    
    def __init__(self, model, feature_extractor):
        """
        初始化目标域诊断器
        
        Args:
            model: 训练好的模型
            feature_extractor: 特征提取器
        """
        self.model = model
        self.feature_extractor = feature_extractor
        
    def diagnose_target_domain(self, target_data):
        """
        诊断目标域数据
        
        Args:
            target_data (np.array): 目标域数据
            
        Returns:
            np.array: 预测结果
        """
        predictions = self.model.predict(target_data)
        return predictions
    
    def visualize_diagnosis_results(self, predictions, true_labels=None, class_names=None):
        """
        可视化诊断结果
        
        Args:
            predictions (np.array): 预测结果
            true_labels (np.array): 真实标签
            class_names (list): 类别名称
        """
        plt.figure(figsize=(15, 10))
        
        # 预测概率分布
        plt.subplot(2, 2, 1)
        plt.imshow(predictions.T, aspect='auto', cmap='viridis')
        plt.colorbar()
        plt.title('预测概率分布')
        plt.xlabel('样本索引')
        plt.ylabel('类别')
        
        # 置信度分布
        plt.subplot(2, 2, 2)
        confidence = np.max(predictions, axis=1)
        plt.hist(confidence, bins=30, alpha=0.7, color='skyblue')
        plt.title('预测置信度分布')
        plt.xlabel('置信度')
        plt.ylabel('频次')
        
        if true_labels is not None:
            # 混淆矩阵
            plt.subplot(2, 2, 3)
            pred_labels = np.argmax(predictions, axis=1)
            cm = confusion_matrix(true_labels, pred_labels)
            sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                       xticklabels=class_names, yticklabels=class_names)
            plt.title('混淆矩阵')
            plt.xlabel('预测标签')
            plt.ylabel('真实标签')
            
            # 分类报告
            plt.subplot(2, 2, 4)
            report = classification_report(true_labels, pred_labels, 
                                         target_names=class_names, output_dict=True)
            report_df = pd.DataFrame(report).transpose()
            plt.table(cellText=report_df.values,
                     rowLabels=report_df.index,
                     colLabels=report_df.columns,
                     cellLoc='center',
                     loc='center')
            plt.axis('off')
            plt.title('分类报告')
        
        plt.tight_layout()
        plt.show()
    
    def generate_diagnosis_report(self, predictions, file_names):
        """
        生成诊断报告
        
        Args:
            predictions (np.array): 预测结果
            file_names (list): 文件名列表
            
        Returns:
            pd.DataFrame: 诊断报告
        """
        report_data = []
        for i, (file_name, pred) in enumerate(zip(file_names, predictions)):
            pred_label = np.argmax(pred)
            confidence = np.max(pred)
            report_data.append({
                'file_name': file_name,
                'predicted_label': pred_label,
                'confidence': confidence,
                'probabilities': pred.tolist()
            })
        
        return pd.DataFrame(report_data)


class TransferProcessExplanation:
    """迁移过程解释类"""
    
    def __init__(self, source_features, target_features, source_labels, target_predictions):
        """
        初始化迁移过程解释器
        
        Args:
            source_features (np.array): 源域特征
            target_features (np.array): 目标域特征
            source_labels (np.array): 源域标签
            target_predictions (np.array): 目标域预测
        """
        self.source_features = source_features
        self.target_features = target_features
        self.source_labels = source_labels
        self.target_predictions = target_predictions
        
    def visualize_domain_alignment(self):
        """可视化域对齐效果"""
        from sklearn.manifold import TSNE
        
        # 使用t-SNE降维
        combined_features = np.vstack([self.source_features, self.target_features])
        tsne = TSNE(n_components=2, random_state=42)
        embedded = tsne.fit_transform(combined_features)
        
        # 创建域标签
        source_domain = np.zeros(len(self.source_features))
        target_domain = np.ones(len(self.target_features))
        domain_labels = np.concatenate([source_domain, target_domain])
        
        plt.figure(figsize=(15, 6))
        
        # 域分布
        plt.subplot(1, 2, 1)
        scatter = plt.scatter(embedded[:, 0], embedded[:, 1], c=domain_labels, 
                            cmap='coolwarm', alpha=0.7)
        plt.colorbar(scatter, ticks=[0, 1])
        plt.title('域分布 (源域=0, 目标域=1)')
        
        # 类别分布
        plt.subplot(1, 2, 2)
        combined_labels = np.concatenate([
            self.source_labels, 
            np.argmax(self.target_predictions, axis=1)
        ])
        scatter = plt.scatter(embedded[:, 0], embedded[:, 1], c=combined_labels, 
                            cmap='viridis', alpha=0.7)
        plt.colorbar(scatter)
        plt.title('类别分布')
        
        plt.tight_layout()
        plt.show()
    
    def analyze_feature_distribution(self):
        """分析特征分布差异"""
        # 计算特征分布差异
        distribution_differences = []
        for i in range(self.source_features.shape[1]):
            source_dist = np.histogram(self.source_features[:, i], bins=50, density=True)[0]
            target_dist = np.histogram(self.target_features[:, i], bins=50, density=True)[0]
            js_distance = jensenshannon(source_dist, target_dist)
            distribution_differences.append(js_distance)
        
        # 可视化分布差异
        plt.figure(figsize=(12, 6))
        plt.bar(range(len(distribution_differences)), distribution_differences, 
               color='lightcoral', alpha=0.7)
        plt.title('特征分布差异 (Jensen-Shannon距离)')
        plt.xlabel('特征索引')
        plt.ylabel('JS距离')
        plt.show()
        
        return distribution_differences
    
    def calculate_transfer_metrics(self):
        """计算迁移性能指标"""
        # 这里使用伪标签进行评估
        target_pred_labels = np.argmax(self.target_predictions, axis=1)
        
        metrics = {
            'source_accuracy': accuracy_score(self.source_labels, 
                                            np.argmax(self.source_features, axis=1) 
                                            if self.source_features.ndim > 1 else self.source_labels),
            'target_confidence': np.mean(np.max(self.target_predictions, axis=1)),
            'domain_alignment': self._calculate_domain_alignment(),
            'feature_similarity': self._calculate_feature_similarity()
        }
        
        return metrics
    
    def _calculate_domain_alignment(self):
        """计算域对齐程度"""
        # 构建域分类任务
        X_domain = np.vstack([self.source_features, self.target_features])
        y_domain = np.concatenate([np.zeros(len(self.source_features)), 
                                 np.ones(len(self.target_features))])
        
        # 训练域分类器
        clf = LinearSVC()
        scores = cross_val_score(clf, X_domain, y_domain, cv=5)
        
        # 域分类准确率越低，说明对齐越好
        return 1 - np.mean(scores)
    
    def _calculate_feature_similarity(self):
        """计算特征相似度"""
        source_mean = np.mean(self.source_features, axis=0)
        target_mean = np.mean(self.target_features, axis=0)
        
        return 1 - cosine(source_mean, target_mean)


class DomainAdaptationPipeline:
    """域适应管道类"""
    
    def __init__(self, input_shape, num_classes, model_type='dann'):
        """
        初始化域适应管道
        
        Args:
            input_shape (tuple): 输入形状
            num_classes (int): 类别数量
            model_type (str): 模型类型
        """
        self.input_shape = input_shape
        self.num_classes = num_classes
        self.model_type = model_type
        self.model = None
        self.trainer = None
        
    def build_model(self):
        """构建域适应模型"""
        model_builder = DomainAdaptationModels(self.input_shape, self.num_classes)
        
        if self.model_type == 'dann':
            self.model = model_builder.build_dann_model()
        elif self.model_type == 'deep_coral':
            self.model = model_builder.build_deep_coral_model()
        elif self.model_type == 'adversarial':
            self.model = model_builder.build_adversarial_domain_adaptation()
        
        self.trainer = TransferLearningTrainer(self.model, self.model_type)
        self.trainer.compile_model()
        
    def train(self, source_data, target_data, epochs=100, batch_size=32):
        """
        训练域适应模型
        
        Args:
            source_data (tuple): 源域数据
            target_data (tuple): 目标域数据
            epochs (int): 训练轮数
            batch_size (int): 批次大小
        """
        if self.model_type == 'dann':
            self.trainer.train_dann(source_data, target_data, epochs, batch_size)
        elif self.model_type == 'deep_coral':
            self.trainer.train_deep_coral(source_data, target_data, epochs, batch_size)
    
    def evaluate(self, target_data, true_labels=None, class_names=None):
        """
        评估域适应模型
        
        Args:
            target_data (np.array): 目标域数据
            true_labels (np.array): 真实标签
            class_names (list): 类别名称
            
        Returns:
            dict: 评估结果
        """
        # 预测
        predictions = self.model.predict(target_data)
        
        # 诊断器
        diagnosis = TargetDomainDiagnosis(self.model, None)
        
        # 可视化结果
        diagnosis.visualize_diagnosis_results(predictions, true_labels, class_names)
        
        # 生成报告
        file_names = [f'target_{i}' for i in range(len(target_data))]
        report = diagnosis.generate_diagnosis_report(predictions, file_names)
        
        return {
            'predictions': predictions,
            'report': report
        }


def main():
    """主函数示例"""
    # 生成示例数据
    np.random.seed(42)
    
    # 源域数据
    X_source = np.random.randn(500, 30, 1)
    y_source = np.random.randint(0, 4, 500)
    y_source_cat = tf.keras.utils.to_categorical(y_source)
    
    # 目标域数据
    X_target = np.random.randn(200, 30, 1) + 0.5  # 添加域偏移
    y_target = np.random.randint(0, 4, 200)
    y_target_cat = tf.keras.utils.to_categorical(y_target)
    
    # 模型参数
    input_shape = (30, 1)
    num_classes = 4
    class_names = ['正常', '外圈故障', '内圈故障', '滚动体故障']
    
    # 创建域适应管道
    pipeline = DomainAdaptationPipeline(input_shape, num_classes, 'dann')
    pipeline.build_model()
    
    # 训练模型
    print("训练域适应模型...")
    pipeline.train((X_source, y_source_cat), (X_target, None), epochs=20)
    
    # 评估模型
    print("评估模型性能...")
    results = pipeline.evaluate(X_target, y_target, class_names)
    
    # 迁移过程分析
    print("分析迁移过程...")
    transfer_analyzer = TransferProcessExplanation(
        source_features=X_source.reshape(X_source.shape[0], -1),
        target_features=X_target.reshape(X_target.shape[0], -1),
        source_labels=y_source,
        target_predictions=results['predictions'][1]  # 目标域预测
    )
    
    # 可视化域对齐
    transfer_analyzer.visualize_domain_alignment()
    
    # 分析特征分布差异
    distribution_differences = transfer_analyzer.analyze_feature_distribution()
    
    # 计算迁移指标
    transfer_metrics = transfer_analyzer.calculate_transfer_metrics()
    print("迁移指标:", transfer_metrics)


if __name__ == "__main__":
    main()
