"""
高速列车轴承智能故障诊断 - 完整流水线和部署模块

本模块包含：
1. 完整诊断流水线集成
2. 模型部署和监控系统
3. 性能评估和报告生成

作者：数学建模团队
版本：1.0
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import json
import time
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Tuple, Optional, Any
import warnings
warnings.filterwarnings('ignore')

# 导入其他模块
from data_preprocessing_and_feature_engineering import DataProcessingPipeline
from deep_learning_models import DeepLearningModels, ModelTrainer, ModelEnsemble
from domain_adaptive_transfer_learning import DomainAdaptationPipeline
from interpretability_analysis import InterpretabilityPipeline, ComprehensiveExplanationReport

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class CompleteDiagnosisPipeline:
    """完整诊断流水线类"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化完整诊断流水线
        
        Args:
            config (dict): 配置参数
        """
        self.config = config
        self.preprocessor = None
        self.feature_extractor = None
        self.model = None
        self.xai_module = None
        self.results = {}
        
        # 初始化日志
        self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
        
    def build_pipeline(self):
        """构建完整管道"""
        try:
            self.logger.info("开始构建诊断流水线...")
            
            # 初始化数据处理管道
            self.preprocessor = DataProcessingPipeline(
                self.config['data_path'], 
                self.config.get('target_fs', 12000)
            )
            
            # 初始化特征提取器
            from data_preprocessing_and_feature_engineering import FeatureExtractor
            self.feature_extractor = FeatureExtractor(self.config.get('target_fs', 12000))
            
            # 初始化模型
            self.model = self._build_model()
            
            # 初始化可解释性模块
            self.xai_module = InterpretabilityPipeline(
                self.model,
                self.config['feature_names'],
                self.config['class_names']
            )
            
            self.logger.info("诊断流水线构建完成")
            
        except Exception as e:
            self.logger.error(f"构建流水线时出错: {str(e)}")
            raise
    
    def _build_model(self):
        """根据配置构建模型"""
        model_builder = DeepLearningModels(
            self.config['input_shape'], 
            self.config['num_classes']
        )
        
        model_type = self.config.get('model_type', 'hybrid')
        
        if model_type == 'cnn':
            return model_builder.build_cnn_model()
        elif model_type == 'resnet':
            return model_builder.build_resnet_model()
        elif model_type == 'transformer':
            return model_builder.build_transformer_model()
        elif model_type == 'hybrid':
            return model_builder.build_hybrid_model()
        elif model_type == 'lstm':
            return model_builder.build_lstm_model()
        elif model_type == 'bidirectional_lstm':
            return model_builder.build_bidirectional_lstm_model()
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
    
    def run_pipeline(self, source_files: List[str], target_files: List[str]) -> Dict[str, Any]:
        """
        运行完整管道
        
        Args:
            source_files (list): 源域文件列表
            target_files (list): 目标域文件列表
            
        Returns:
            dict: 结果字典
        """
        try:
            self.logger.info("开始运行完整诊断流水线...")
            results = {}
            
            # 1. 数据加载与预处理
            self.logger.info("步骤1: 加载和预处理数据...")
            source_data, source_info = self.preprocessor.load_mat_files(source_files)
            target_data, target_info = self.preprocessor.load_mat_files(target_files)
            
            # 2. 特征提取
            self.logger.info("步骤2: 提取特征...")
            source_features = self._extract_features_batch(source_data)
            target_features = self._extract_features_batch(target_data)
            
            # 3. 模型训练（源域）
            self.logger.info("步骤3: 训练源域模型...")
            model_trainer = self._train_source_model(source_features)
            
            # 4. 迁移学习（如果配置了目标域数据）
            if len(target_files) > 0:
                self.logger.info("步骤4: 执行迁移学习...")
                transfer_results = self._perform_transfer_learning(source_features, target_features)
                results['transfer_results'] = transfer_results
            
            # 5. 目标域诊断
            self.logger.info("步骤5: 诊断目标域...")
            diagnosis_results = self._diagnose_target_domain(target_features)
            
            # 6. 可解释性分析
            self.logger.info("步骤6: 执行可解释性分析...")
            explainability_results = self._perform_explainability_analysis(
                source_features, target_features, diagnosis_results
            )
            
            results.update({
                'diagnosis_results': diagnosis_results,
                'explainability_results': explainability_results,
                'source_info': source_info,
                'target_info': target_info,
                'model_trainer': model_trainer
            })
            
            self.results = results
            self.logger.info("完整诊断流水线运行完成")
            
            return results
            
        except Exception as e:
            self.logger.error(f"运行流水线时出错: {str(e)}")
            raise
    
    def _extract_features_batch(self, data_list: List[Dict]) -> pd.DataFrame:
        """批量提取特征"""
        features_list = []
        for data in data_list:
            if data['de_signal'] is not None:
                features = self.feature_extractor.extract_all_features(
                    data['de_signal'], data['rpm']
                )
                features['file_name'] = data['file_name']
                features['fault_type'] = data['fault_type']
                features['fault_size'] = data['fault_size']
                features['load_condition'] = data['load_condition']
                features_list.append(features)
        return pd.DataFrame(features_list)
    
    def _train_source_model(self, features: pd.DataFrame) -> ModelTrainer:
        """训练源域模型"""
        from sklearn.preprocessing import LabelEncoder
        from sklearn.model_selection import train_test_split
        from tensorflow.keras.utils import to_categorical
        
        # 准备数据
        X = features.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        y = LabelEncoder().fit_transform(features['fault_type'])
        y_cat = to_categorical(y)
        
        # 划分数据集
        X_train, X_test, y_train, y_test = train_test_split(
            X.values, y_cat, test_size=0.2, random_state=42, stratify=y
        )
        
        # 重塑数据以适应模型
        X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
        X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
        
        # 训练模型
        trainer = ModelTrainer(self.model, 'source_model')
        trainer.compile_model()
        trainer.train_model(X_train, y_train, X_test, y_test, epochs=50)
        
        return trainer
    
    def _perform_transfer_learning(self, source_features: pd.DataFrame, 
                                 target_features: pd.DataFrame) -> Dict[str, Any]:
        """执行迁移学习"""
        from sklearn.preprocessing import LabelEncoder
        from tensorflow.keras.utils import to_categorical
        
        # 准备源域数据
        X_source = source_features.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        y_source = LabelEncoder().fit_transform(source_features['fault_type'])
        y_source_cat = to_categorical(y_source)
        
        # 准备目标域数据
        X_target = target_features.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        
        # 重塑数据
        X_source = X_source.values.reshape(X_source.shape[0], X_source.shape[1], 1)
        X_target = X_target.values.reshape(X_target.shape[0], X_target.shape[1], 1)
        
        # 创建域适应管道
        da_pipeline = DomainAdaptationPipeline(
            self.config['input_shape'], 
            self.config['num_classes'], 
            'dann'
        )
        da_pipeline.build_model()
        
        # 训练域适应模型
        da_pipeline.train((X_source, y_source_cat), (X_target, None), epochs=30)
        
        # 评估模型
        evaluation_results = da_pipeline.evaluate(X_target, None, self.config['class_names'])
        
        return {
            'da_pipeline': da_pipeline,
            'evaluation_results': evaluation_results
        }
    
    def _diagnose_target_domain(self, target_features: pd.DataFrame) -> Dict[str, Any]:
        """诊断目标域"""
        from sklearn.preprocessing import LabelEncoder
        from tensorflow.keras.utils import to_categorical
        
        # 准备目标域数据
        X_target = target_features.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        y_target = LabelEncoder().fit_transform(target_features['fault_type'])
        y_target_cat = to_categorical(y_target)
        
        # 重塑数据
        X_target = X_target.values.reshape(X_target.shape[0], X_target.shape[1], 1)
        
        # 使用训练好的模型进行预测
        predictions = self.model.predict(X_target)
        predicted_classes = np.argmax(predictions, axis=1)
        true_classes = np.argmax(y_target_cat, axis=1)
        
        # 计算准确率
        from sklearn.metrics import accuracy_score, classification_report
        accuracy = accuracy_score(true_classes, predicted_classes)
        
        return {
            'predictions': predictions,
            'predicted_classes': predicted_classes,
            'true_classes': true_classes,
            'accuracy': accuracy,
            'classification_report': classification_report(true_classes, predicted_classes)
        }
    
    def _perform_explainability_analysis(self, source_features: pd.DataFrame, 
                                       target_features: pd.DataFrame, 
                                       diagnosis_results: Dict[str, Any]) -> Dict[str, Any]:
        """执行可解释性分析"""
        # 准备数据
        X_target = target_features.drop(['file_name', 'fault_type', 'fault_size', 'load_condition'], axis=1)
        X_target = X_target.values.reshape(X_target.shape[0], X_target.shape[1], 1)
        
        # 运行可解释性分析
        xai_results = self.xai_module.run_comprehensive_analysis(
            X_target, 
            diagnosis_results['true_classes'],
            sample_indices=range(min(5, len(X_target)))
        )
        
        # 生成综合报告
        report_generator = ComprehensiveExplanationReport(
            self.model,
            X_target,
            self.config['feature_names'],
            self.config['class_names']
        )
        
        comprehensive_report = report_generator.generate_comprehensive_report(
            sample_indices=range(min(3, len(X_target)))
        )
        
        return {
            'xai_results': xai_results,
            'comprehensive_report': comprehensive_report
        }


class DeploymentSystem:
    """部署系统类"""
    
    def __init__(self, pipeline: CompleteDiagnosisPipeline, 
                 monitoring_interval: int = 3600):
        """
        初始化部署系统
        
        Args:
            pipeline: 诊断流水线
            monitoring_interval (int): 监控间隔（秒）
        """
        self.pipeline = pipeline
        self.monitoring_interval = monitoring_interval
        self.performance_history = []
        self.model = None
        self.is_deployed = False
        
        # 初始化日志
        self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
        
    def deploy_model(self, model_path: str = None):
        """
        部署模型
        
        Args:
            model_path (str): 模型文件路径
        """
        try:
            if model_path and os.path.exists(model_path):
                # 从文件加载模型
                import tensorflow as tf
                self.model = tf.keras.models.load_model(model_path)
                self.logger.info(f"模型从 {model_path} 部署成功")
            else:
                # 使用流水线中的模型
                self.model = self.pipeline.model
                self.logger.info("使用流水线中的模型进行部署")
            
            self.is_deployed = True
            self.logger.info("模型部署完成")
            
        except Exception as e:
            self.logger.error(f"部署模型时出错: {str(e)}")
            raise
    
    def monitor_performance(self, live_data: np.ndarray, true_labels: np.ndarray = None):
        """
        监控模型性能
        
        Args:
            live_data (np.array): 实时数据
            true_labels (np.array): 真实标签（可选）
        """
        if not self.is_deployed:
            self.logger.warning("模型尚未部署，无法进行监控")
            return
        
        try:
            current_metrics = self._evaluate_on_live_data(live_data, true_labels)
            
            # 记录性能历史
            performance_record = {
                'timestamp': pd.Timestamp.now(),
                'metrics': current_metrics,
                'data_shape': live_data.shape
            }
            self.performance_history.append(performance_record)
            
            self.logger.info(f"性能监控完成 - 准确率: {current_metrics.get('accuracy', 'N/A'):.4f}")
            
            # 检查性能下降
            if self._check_performance_degradation():
                self.logger.warning("检测到模型性能下降！")
                self._trigger_retraining()
                
        except Exception as e:
            self.logger.error(f"监控性能时出错: {str(e)}")
    
    def _evaluate_on_live_data(self, live_data: np.ndarray, 
                              true_labels: np.ndarray = None) -> Dict[str, float]:
        """在实时数据上评估"""
        try:
            # 预测
            predictions = self.model.predict(live_data)
            predicted_classes = np.argmax(predictions, axis=1)
            
            metrics = {
                'prediction_confidence': float(np.mean(np.max(predictions, axis=1))),
                'prediction_std': float(np.std(np.max(predictions, axis=1))),
                'data_size': len(live_data)
            }
            
            if true_labels is not None:
                from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
                true_classes = np.argmax(true_labels, axis=1) if true_labels.ndim > 1 else true_labels
                
                metrics.update({
                    'accuracy': accuracy_score(true_classes, predicted_classes),
                    'precision': precision_score(true_classes, predicted_classes, average='weighted'),
                    'recall': recall_score(true_classes, predicted_classes, average='weighted'),
                    'f1_score': f1_score(true_classes, predicted_classes, average='weighted')
                })
            
            return metrics
            
        except Exception as e:
            self.logger.error(f"评估实时数据时出错: {str(e)}")
            return {'error': str(e)}
    
    def _check_performance_degradation(self, threshold: float = 0.05) -> bool:
        """检查性能下降"""
        if len(self.performance_history) < 2:
            return False
        
        try:
            recent_metrics = self.performance_history[-1]['metrics']
            previous_metrics = self.performance_history[-2]['metrics']
            
            if 'accuracy' in recent_metrics and 'accuracy' in previous_metrics:
                recent_acc = recent_metrics['accuracy']
                previous_acc = previous_metrics['accuracy']
                return recent_acc < previous_acc - threshold
            
            return False
            
        except Exception as e:
            self.logger.error(f"检查性能下降时出错: {str(e)}")
            return False
    
    def _trigger_retraining(self):
        """触发重新训练"""
        self.logger.info("启动模型重新训练...")
        # 这里可以实现重新训练逻辑
        # 例如：收集新数据、重新训练模型、更新部署等
        pass
    
    def generate_monitoring_report(self, save_path: str = None) -> pd.DataFrame:
        """
        生成监控报告
        
        Args:
            save_path (str): 保存路径
            
        Returns:
            pd.DataFrame: 监控报告
        """
        if not self.performance_history:
            self.logger.warning("没有性能历史数据，无法生成报告")
            return pd.DataFrame()
        
        try:
            # 准备报告数据
            report_data = []
            for record in self.performance_history:
                report_data.append({
                    'timestamp': record['timestamp'],
                    **record['metrics']
                })
            
            report_df = pd.DataFrame(report_data)
            
            # 可视化监控结果
            self._plot_monitoring_results(report_df)
            
            # 保存报告
            if save_path:
                report_df.to_csv(save_path, index=False)
                self.logger.info(f"监控报告已保存到 {save_path}")
            
            return report_df
            
        except Exception as e:
            self.logger.error(f"生成监控报告时出错: {str(e)}")
            return pd.DataFrame()
    
    def _plot_monitoring_results(self, report_df: pd.DataFrame):
        """绘制监控结果"""
        try:
            plt.figure(figsize=(15, 10))
            
            # 性能指标时间序列
            metrics_to_plot = ['accuracy', 'precision', 'recall', 'f1_score', 'prediction_confidence']
            available_metrics = [m for m in metrics_to_plot if m in report_df.columns]
            
            if available_metrics:
                for i, metric in enumerate(available_metrics):
                    plt.subplot(2, 3, i+1)
                    plt.plot(report_df['timestamp'], report_df[metric], marker='o', linewidth=2)
                    plt.title(f'{metric} 时间序列')
                    plt.xlabel('时间')
                    plt.ylabel(metric)
                    plt.xticks(rotation=45)
                    plt.grid(True, alpha=0.3)
            
            # 数据量统计
            if 'data_size' in report_df.columns:
                plt.subplot(2, 3, len(available_metrics)+1)
                plt.bar(range(len(report_df)), report_df['data_size'], alpha=0.7)
                plt.title('数据量统计')
                plt.xlabel('监控次数')
                plt.ylabel('数据量')
                plt.grid(True, alpha=0.3)
            
            plt.tight_layout()
            plt.show()
            
        except Exception as e:
            self.logger.error(f"绘制监控结果时出错: {str(e)}")


class PerformanceEvaluator:
    """性能评估器类"""
    
    def __init__(self):
        """初始化性能评估器"""
        self.evaluation_results = {}
        self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
    
    def evaluate_pipeline(self, pipeline_results: Dict[str, Any]) -> Dict[str, Any]:
        """
        评估流水线性能
        
        Args:
            pipeline_results (dict): 流水线结果
            
        Returns:
            dict: 评估结果
        """
        try:
            self.logger.info("开始评估流水线性能...")
            
            evaluation = {
                'diagnosis_accuracy': self._evaluate_diagnosis_accuracy(pipeline_results),
                'feature_quality': self._evaluate_feature_quality(pipeline_results),
                'model_performance': self._evaluate_model_performance(pipeline_results),
                'explainability_quality': self._evaluate_explainability_quality(pipeline_results),
                'overall_score': 0.0
            }
            
            # 计算总体评分
            scores = [v for v in evaluation.values() if isinstance(v, (int, float)) and v > 0]
            if scores:
                evaluation['overall_score'] = np.mean(scores)
            
            self.evaluation_results = evaluation
            self.logger.info(f"流水线性能评估完成 - 总体评分: {evaluation['overall_score']:.4f}")
            
            return evaluation
            
        except Exception as e:
            self.logger.error(f"评估流水线性能时出错: {str(e)}")
            return {}
    
    def _evaluate_diagnosis_accuracy(self, results: Dict[str, Any]) -> float:
        """评估诊断准确率"""
        if 'diagnosis_results' in results:
            return results['diagnosis_results'].get('accuracy', 0.0)
        return 0.0
    
    def _evaluate_feature_quality(self, results: Dict[str, Any]) -> float:
        """评估特征质量"""
        # 这里可以实现更复杂的特征质量评估逻辑
        return 0.8  # 示例值
    
    def _evaluate_model_performance(self, results: Dict[str, Any]) -> float:
        """评估模型性能"""
        if 'model_trainer' in results and hasattr(results['model_trainer'], 'history'):
            history = results['model_trainer'].history
            if history and 'val_accuracy' in history.history:
                return max(history.history['val_accuracy'])
        return 0.0
    
    def _evaluate_explainability_quality(self, results: Dict[str, Any]) -> float:
        """评估可解释性质量"""
        # 这里可以实现可解释性质量评估逻辑
        return 0.7  # 示例值
    
    def generate_evaluation_report(self, save_path: str = None) -> str:
        """
        生成评估报告
        
        Args:
            save_path (str): 保存路径
            
        Returns:
            str: 报告内容
        """
        try:
            report = f"""
# 高速列车轴承智能故障诊断系统 - 性能评估报告

## 评估时间
{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

## 评估结果

### 诊断准确率
- 准确率: {self.evaluation_results.get('diagnosis_accuracy', 0.0):.4f}

### 特征质量
- 质量评分: {self.evaluation_results.get('feature_quality', 0.0):.4f}

### 模型性能
- 性能评分: {self.evaluation_results.get('model_performance', 0.0):.4f}

### 可解释性质量
- 质量评分: {self.evaluation_results.get('explainability_quality', 0.0):.4f}

### 总体评分
- 综合评分: {self.evaluation_results.get('overall_score', 0.0):.4f}

## 建议
- 如果准确率低于0.8，建议增加训练数据或调整模型参数
- 如果可解释性质量较低，建议增加更多可解释性分析方法
- 定期监控模型性能，及时进行模型更新

---
报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
            """
            
            if save_path:
                with open(save_path, 'w', encoding='utf-8') as f:
                    f.write(report)
                self.logger.info(f"评估报告已保存到 {save_path}")
            
            return report
            
        except Exception as e:
            self.logger.error(f"生成评估报告时出错: {str(e)}")
            return ""


def main():
    """主函数示例"""
    # 配置参数
    config = {
        'data_path': 'path/to/your/data',
        'model_type': 'hybrid',
        'input_shape': (30, 1),
        'num_classes': 4,
        'feature_names': [f'feature_{i}' for i in range(30)],
        'class_names': ['正常', '外圈故障', '内圈故障', '滚动体故障'],
        'target_fs': 12000
    }
    
    # 创建完整诊断流水线
    pipeline = CompleteDiagnosisPipeline(config)
    pipeline.build_pipeline()
    
    # 模拟文件列表
    source_files = [f'source_{i}.mat' for i in range(50)]
    target_files = [f'target_{i}.mat' for i in range(20)]
    
    # 运行流水线
    print("运行完整诊断流水线...")
    results = pipeline.run_pipeline(source_files, target_files)
    
    # 性能评估
    evaluator = PerformanceEvaluator()
    evaluation = evaluator.evaluate_pipeline(results)
    
    # 生成评估报告
    report = evaluator.generate_evaluation_report('evaluation_report.md')
    print("评估报告已生成")
    
    # 部署系统
    deployment = DeploymentSystem(pipeline)
    deployment.deploy_model()
    
    # 模拟监控
    print("开始性能监控...")
    for i in range(5):
        # 模拟实时数据
        live_data = np.random.randn(10, 30, 1)
        true_labels = np.random.randint(0, 4, 10)
        true_labels_cat = np.eye(4)[true_labels]
        
        deployment.monitor_performance(live_data, true_labels_cat)
        time.sleep(1)
    
    # 生成监控报告
    monitoring_report = deployment.generate_monitoring_report('monitoring_report.csv')
    print("监控报告已生成")
    
    print("完整流水线和部署系统演示完成！")


if __name__ == "__main__":
    main()
