import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
# 禁用TensorFlow的警告和日志输出
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # 0=默认值（显示所有消息），1=INFO，2=WARNING，3=ERROR

import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ReduceLROnPlateau
from scipy.stats import gaussian_kde
import tensorflow as tf

# 屏蔽TensorFlow的警告和日志
import logging
tf.get_logger().setLevel(logging.ERROR)
# 禁用TensorFlow的cuda错误等警告
tf.autograph.set_verbosity(0)

# 重定向标准输出和错误输出
class DummyFile:
    def write(self, x): pass
    def flush(self): pass

# 在程序启动时尝试重定向标准输出，如果失败则忽略
try:
    # 保存原始的标准输出/错误引用
    orig_stdout = sys.stdout
    orig_stderr = sys.stderr
    # 创建空的输出对象
    sys.stdout = DummyFile()
    sys.stderr = DummyFile()
except:
    # 如果失败，什么都不做，继续运行
    pass

# 创建一个打印函数，完全不使用控制台输出，只通过回调函数显示信息
def safe_print(message, callback=None):
    """安全打印函数，完全避免使用stdout，只通过回调函数显示信息"""
    # 只在回调函数存在时使用回调函数
    if callback is not None:
        callback(message)
    # 如果没有回调函数，则完全忽略输出
    # 不再尝试使用print，避免在无控制台时出错

# 设置全局字体为 Microsoft YaHei（确保已安装）
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

class AEAnalyzer:
    def __init__(self, print_callback=None):
        self.scaler = StandardScaler()
        self.pca = PCA(n_components=0.95)
        self.imputer = SimpleImputer(strategy='mean')
        self.autoencoder = None
        self.n_features = None
        self.print_callback = print_callback
        
    def preprocess_data(self, data, remove_outliers=True):
        """
        预处理数据：去除异常值（可选），填充缺失值，标准化
        """
        try:
            # 如果需要去除异常值
            if remove_outliers:
                # 计算 IQR（四分位距）
                Q1 = data.quantile(0.25)  # 第一四分位数
                Q3 = data.quantile(0.75)  # 第三四分位数
                IQR = Q3 - Q1  # IQR 计算
                
                # 计算异常值边界
                lower_bound = Q1 - 1.5 * IQR
                upper_bound = Q3 + 1.5 * IQR
                
                # 识别异常值（小于下界或大于上界）
                outlier_mask = (data < lower_bound) | (data > upper_bound)
                
                # 计算异常值的数量
                outlier_count = outlier_mask.sum().sum()
                outlier_rows = outlier_mask.any(axis=1).sum()
                
                # 用均值填充异常值
                data_cleaned = data.copy()
                data_cleaned[outlier_mask] = np.nan  # 先将异常值替换为 NaN
                data_cleaned = data_cleaned.fillna(data.mean())  # 再用均值填充 NaN
                
                safe_print(f"检测到的异常值总数: {outlier_count}", self.print_callback)
                safe_print(f"包含异常值的行数: {outlier_rows}", self.print_callback)
                
                # 用处理后的数据
                data = data_cleaned
            
            # 用均值填充缺失值
            X_imputed = self.imputer.fit_transform(data)
            
            # 标准化数据
            X_scaled = self.scaler.fit_transform(X_imputed)
            
            # 计算变量相关性矩阵
            correlation_matrix = np.corrcoef(X_scaled.T)
            
            # PCA降维
            X_pca = self.pca.fit_transform(X_scaled)
            safe_print(f"总共有 {data.shape[1]} 个变量", self.print_callback)
            safe_print(f"降维后特征数: {X_pca.shape[1]}", self.print_callback)
            
            return X_pca, correlation_matrix
        except Exception as e:
            safe_print(f"数据预处理失败: {str(e)}", self.print_callback)
            return None, None

    def build_model(self, n_features):
        """构建自动编码器（AE）模型"""
        self.n_features = n_features
        
        # 输入层
        inputs = Input(shape=(n_features,))
        
        # 编码器
        encoded = Dense(64, activation='relu')(inputs)
        encoded = Dropout(0.2)(encoded)
        encoded = Dense(32, activation='relu')(encoded)
        encoded = Dense(16, activation='relu', name='bottleneck')(encoded)
        
        # 解码器
        decoded = Dense(32, activation='relu')(encoded)
        decoded = Dropout(0.2)(decoded)
        decoded = Dense(64, activation='relu')(decoded)
        decoded = Dense(n_features, activation='linear')(decoded)  # 输出维度与输入相同
        
        # 构建 Autoencoder 模型
        self.autoencoder = Model(inputs, decoded)
        self.autoencoder.compile(optimizer=Adam(learning_rate=0.001), loss='mse')
        
        return self.autoencoder
    
    def train_model(self, X_train, epochs=150, batch_size=128, validation_split=0.1, callbacks=None):
        """训练自动编码器模型"""
        if self.autoencoder is None:
            safe_print("请先构建模型", self.print_callback)
            return None
            
        # 默认回调函数，将verbose设置为0以避免控制台输出
        default_callbacks = [
            ReduceLROnPlateau(monitor='val_loss', 
                             factor=0.5, 
                             patience=10, 
                             min_lr=1e-6,
                             verbose=0)  # 设置为0以避免输出到控制台
        ]
        
        # 合并自定义回调函数
        if callbacks:
            default_callbacks.extend(callbacks)
            
        history = self.autoencoder.fit(X_train, X_train,
                                     epochs=epochs,
                                     batch_size=batch_size,
                                     validation_split=validation_split,
                                     verbose=0,  # 设置为0以避免输出到控制台
                                     callbacks=default_callbacks)
        return history

    def calculate_re2(self, data):
        """计算RE²重构误差的平方"""
        # 使用静默预测，完全屏蔽所有tensorflow输出
        with tf.keras.utils.CustomObjectScope({}):
            # 使用简单的方式屏蔽输出
            try:
                # 保存原始的标准输出和错误输出
                original_stdout = sys.stdout
                original_stderr = sys.stderr
                
                # 替换为DummyFile
                sys.stdout = DummyFile()
                sys.stderr = DummyFile()
                
                # 执行预测
                reconstructions = self.autoencoder.predict(data, verbose=0)
                
            except Exception as e:
                # 如果失败，忽略错误，继续执行
                try:
                    reconstructions = self.autoencoder.predict(data, verbose=0)
                except:
                    # 如果还是失败，使用空数组避免程序崩溃
                    reconstructions = np.zeros_like(data)
            finally:
                # 恢复原始输出流
                try:
                    sys.stdout = original_stdout
                    sys.stderr = original_stderr
                except:
                    pass
                    
        errors = np.square(data - reconstructions)  # 计算每个样本的误差平方
        max_errors = np.max(errors, axis=1)  # 取每个样本最大误差平方
        return max_errors
    
    def calculate_spe(self, data):
        """计算平方预测误差（SPE）"""
        # 使用简单的方式屏蔽输出
        try:
            # 保存原始的标准输出和错误输出
            original_stdout = sys.stdout
            original_stderr = sys.stderr
            
            # 替换为DummyFile
            sys.stdout = DummyFile()
            sys.stderr = DummyFile()
            
            # 使用模型预测
            reconstructions = self.autoencoder.predict(data, verbose=0)
            
        except Exception as e:
            # 如果失败，忽略错误，继续执行
            try:
                reconstructions = self.autoencoder.predict(data, verbose=0)
            except:
                # 如果还是失败，使用空数组避免程序崩溃
                reconstructions = np.zeros_like(data)
        finally:
            # 恢复原始输出流
            try:
                sys.stdout = original_stdout
                sys.stderr = original_stderr
            except:
                pass
                
        residuals = data - reconstructions
        spe = np.sum(np.square(residuals), axis=1)
        return spe

    def calculate_control_limit(self, statistic, confidence_level=0.99):
        """使用核密度估计（KDE）计算控制限"""
        statistic = np.ravel(statistic)
        kde = gaussian_kde(statistic)
        x = np.linspace(min(statistic), max(statistic), 1000)
        cdf = np.cumsum(kde(x)) / np.sum(kde(x))
        return x[np.where(cdf >= confidence_level)[0][0]]

    def plot_correlation_matrix(self, data):
        """绘制相关性矩阵热图"""
        try:
            # 清除之前的图形
            plt.clf()
            plt.close('all')
            
            # 计算相关性矩阵
            safe_print("正在计算相关性矩阵...", self.print_callback)
            # 使用当前已计算的相关矩阵
            correlation_matrix = np.corrcoef(data.T)
            
            # 创建新图形
            safe_print("正在生成热图...", self.print_callback)
            plt.figure(figsize=(12, 10), dpi=120)
            
            # 设置自定义颜色映射，使得中间值为白色
            cmap = sns.diverging_palette(230, 20, as_cmap=True)
            
            # 绘制热图
            sns.heatmap(correlation_matrix,
                       annot=False,  # 不显示数值，数据量大时更清晰
                       cmap=cmap,    # 使用自定义颜色映射
                       linewidths=0.5,  # 网格线宽度
                       square=True,  # 保持方形
                       vmin=-1, vmax=1,  # 固定色标范围
                       center=0)  # 将0设为中心点
            
            # 设置标题和标签，使用中文
            plt.title('变量相关性矩阵', fontsize=18, pad=20, fontweight='bold')
            plt.xlabel('变量', fontsize=14, labelpad=10)
            plt.ylabel('变量', fontsize=14, labelpad=10)
            
            # 添加颜色条标签
            cbar = plt.gcf().axes[-1]
            cbar.set_ylabel('相关系数', fontsize=12, rotation=270, labelpad=20)
            
            # 调整布局
            plt.tight_layout()
            
            safe_print("相关性矩阵绘制完成", self.print_callback)
            return plt.gcf()
            
        except Exception as e:
            safe_print(f"绘制相关性矩阵时出错: {str(e)}", self.print_callback)
            return None

    def plot_monitoring_charts(self, train_stats, test_stats, control_limit, stat_name):
        """绘制监控图"""
        # 清除之前的图形
        plt.clf()
        plt.close('all')
        
        # 合并数据用于可视化
        all_stats = np.concatenate((train_stats, test_stats))
        
        # 设置全局字体大小和样式
        plt.rcParams.update({
            'font.size': 12,
            'axes.titlesize': 16,
            'axes.labelsize': 14,
            'xtick.labelsize': 12,
            'ytick.labelsize': 12
        })
        
        # 创建新图形
        fig = plt.figure(figsize=(16, 8), dpi=120)
        
        # 1. 分布直方图
        plt.subplot(1, 2, 1)
        
        # 计算合适的直方图区间数
        bin_count = min(50, int(len(train_stats) / 5))
        
        # 绘制训练集分布
        sns.histplot(train_stats, bins=bin_count, alpha=0.6, label='训练集', 
                   color='#6495ED', kde=True, stat='density')
        # 绘制测试集分布
        sns.histplot(test_stats, bins=bin_count, alpha=0.6, label='测试集', 
                   color='#66CDAA', kde=True, stat='density')
        
        # 添加控制限和填充区域
        plt.axvline(control_limit, color='#FF4500', linestyle='--', linewidth=2, label='控制限')
        # 填充超过控制限的区域
        max_val = max(np.max(train_stats), np.max(test_stats)) * 1.1
        plt.fill_between([control_limit, max_val], 0, 1, color='#FFCCCB', alpha=0.3, transform=plt.gca().get_xaxis_transform())
        
        # 设置图表标题和标签
        plt.title(f'{stat_name}分布', fontsize=16, fontweight='bold')
        plt.xlabel(f'{stat_name}值', fontsize=14)
        plt.ylabel('密度', fontsize=14)
        plt.legend(fontsize=12, loc='upper right')
        plt.grid(True, linestyle='--', alpha=0.3)
        
        # 2. 监控图
        plt.subplot(1, 2, 2)
        
        # 绘制统计量值
        plt.plot(test_stats, label=stat_name, color='#4169E1', linewidth=1.5, alpha=0.8)
        
        # 添加控制限线和填充区域
        plt.axhline(control_limit, color='#FF4500', linestyle='--', linewidth=2, label='控制限')
        plt.fill_between(range(len(test_stats)), control_limit, max(control_limit, np.max(test_stats)*1.1), 
                        color='#FFCCCB', alpha=0.2)
        
        # 标记异常点
        anomalies = test_stats > control_limit
        if np.any(anomalies):
            anomaly_indices = np.where(anomalies)[0]
            plt.scatter(anomaly_indices, test_stats[anomaly_indices], 
                      color='#FF0000', label=f'异常点({len(anomaly_indices)}个)',
                      zorder=5, s=60, edgecolor='black')
            
            # 添加标签指示异常点的值
            for i, idx in enumerate(anomaly_indices):
                if i < 10:  # 只显示前10个标签，避免过度拥挤
                    plt.annotate(f'{test_stats[idx]:.2f}', 
                              (idx, test_stats[idx]),
                              textcoords="offset points", 
                              xytext=(0,10), 
                              ha='center',
                              fontsize=9,
                              bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.8))
        
        # 设置图表标题和标签
        plt.title(f'{stat_name}监控图', fontsize=16, fontweight='bold')
        plt.xlabel('样本编号', fontsize=14)
        plt.ylabel(f'{stat_name}值', fontsize=14)
        plt.legend(fontsize=12, loc='upper right')
        plt.grid(True, linestyle='--', alpha=0.3)
        
        # 设置图表整体样式
        for ax in fig.axes:
            ax.spines['top'].set_visible(False)
            ax.spines['right'].set_visible(False)
            ax.spines['left'].set_linewidth(1.2)
            ax.spines['bottom'].set_linewidth(1.2)
            
        # 调整布局
        plt.tight_layout(pad=3.0)
        
        # 添加水印
        fig.text(0.5, 0.01, '自动编码器故障检测', 
               fontsize=12, color='gray', alpha=0.5,
               ha='center', va='bottom')
        
        return fig

def run_fault_detection(file_path, progress_callback=None, epochs=150, remove_outliers=True):
    """运行故障检测流程"""
    try:
        if progress_callback:
            progress_callback("开始读取数据")
        # 读取数据
        data = pd.read_excel(file_path)
        
        if progress_callback:
            progress_callback("创建分析器实例")
        # 创建分析器实例
        analyzer = AEAnalyzer(print_callback=progress_callback)
        
        if progress_callback:
            progress_callback("数据预处理中")
        # 数据预处理
        X_processed, correlation_matrix = analyzer.preprocess_data(data, remove_outliers)
        if X_processed is None:
            if progress_callback:
                progress_callback("数据预处理失败")
            return None
            
        if progress_callback:
            progress_callback("划分训练集和测试集")
        # 划分训练集和测试集
        X_train, X_test = train_test_split(X_processed, test_size=0.2, random_state=42)
        
        if progress_callback:
            progress_callback("构建模型")
        # 构建并训练模型
        analyzer.build_model(X_train.shape[1])
        
        if progress_callback:
            progress_callback("开始训练模型")
        # 创建自定义回调函数来更新进度
        class ProgressCallback(tf.keras.callbacks.Callback):
            def __init__(self, progress_callback):
                super().__init__()
                self.progress_callback = progress_callback
                self.train_losses = []
                
            def on_epoch_end(self, epoch, logs=None):
                if self.progress_callback:
                    total_epochs = self.params['epochs']
                    loss = logs['loss']
                    self.train_losses.append(loss)
                    self.progress_callback(f"训练轮次 {epoch + 1}/{total_epochs}, 损失: {loss:.4f}")
        
        # 训练模型
        progress_cb = ProgressCallback(progress_callback)
        history = analyzer.train_model(X_train, epochs=epochs, callbacks=[progress_cb])
        
        if progress_callback:
            progress_callback("计算重构误差")
        # 计算RE²
        re2_train = analyzer.calculate_re2(X_train)
        re2_test = analyzer.calculate_re2(X_test)
        re2_control_limit = analyzer.calculate_control_limit(re2_train)
        
        # 计算SPE
        spe_train = analyzer.calculate_spe(X_train)
        spe_test = analyzer.calculate_spe(X_test)
        spe_control_limit = analyzer.calculate_control_limit(spe_train)
        
        # 检测异常
        re2_anomalies_mask = re2_test > re2_control_limit
        spe_anomalies_mask = spe_test > spe_control_limit
        
        # 计算异常统计信息
        re2_anomalies = {
            'mask': re2_anomalies_mask,
            'count': np.sum(re2_anomalies_mask),
            'percentage': (np.sum(re2_anomalies_mask) / len(re2_test)) * 100
        }
        
        spe_anomalies = {
            'mask': spe_anomalies_mask,
            'count': np.sum(spe_anomalies_mask),
            'percentage': (np.sum(spe_anomalies_mask) / len(spe_test)) * 100
        }
        
        if progress_callback:
            progress_callback("生成检测结果")
        # 打印结果
        result_text = "\nRE²检测结果:\n"
        result_text += f"RE²异常样本数: {re2_anomalies['count']}\n"
        result_text += f"RE²异常数据的索引: {np.where(re2_anomalies['mask'])[0]}\n"
        result_text += f"RE²控制限（RE²α）: {re2_control_limit:.4f}\n\n"
        
        result_text += "SPE检测结果:\n"
        result_text += f"SPE异常样本数: {spe_anomalies['count']}\n"
        result_text += f"SPE异常数据的索引: {np.where(spe_anomalies['mask'])[0]}\n"
        result_text += f"SPE控制限（阈值）: {spe_control_limit:.4f}"
        
        if progress_callback:
            progress_callback(result_text)
            progress_callback("分析完成！")
        
        # 返回结果
        return {
            'analyzer': analyzer,
            'X_train': X_train,
            'X_test': X_test,
            're2_train': re2_train,
            're2_test': re2_test,
            'spe_train': spe_train,
            'spe_test': spe_test,
            're2_control_limit': re2_control_limit,
            'spe_control_limit': spe_control_limit,
            'correlation_matrix': correlation_matrix,
            'train_losses': progress_cb.train_losses,
            'history': history,
            're2_anomalies': re2_anomalies,
            'spe_anomalies': spe_anomalies
        }
        
    except Exception as e:
        error_msg = f"故障检测过程出错: {str(e)}"
        # 只通过回调函数报告错误，不使用print
        if progress_callback:
            progress_callback(error_msg)
        return None

def run_ae_re2_analysis(file_path, remove_outliers=True, progress_callback=None):
    """运行使用RE²统计量的AE分析"""
    safe_print("\n===== 使用RE²统计量 =====", progress_callback)
    return run_fault_detection(file_path, progress_callback, remove_outliers=remove_outliers)

def run_ae_spe_analysis(file_path, remove_outliers=True, progress_callback=None):
    """运行使用SPE统计量的AE分析"""
    safe_print("\n===== 使用SPE统计量 =====", progress_callback)
    # TODO: 实现SPE统计量分析
    return run_fault_detection(file_path, progress_callback, remove_outliers=remove_outliers)

if __name__ == "__main__":
    # 示例运行
    file_path = 'data/正常数据.xlsx'
    
    # 创建一个简单的回调函数来处理输出
    def example_callback(message):
        # 在这里可以实现自定义的输出处理
        # 例如，将输出写入文件而不是控制台
        with open("ae_analysis_log.txt", "a", encoding="utf-8") as f:
            f.write(message + "\n")
    
    # 使用RE²统计量
    example_callback("\n===== 使用RE²统计量 =====")
    results = run_ae_re2_analysis(file_path, progress_callback=example_callback)
    
    # 使用SPE统计量
    example_callback("\n===== 使用SPE统计量 =====")
    results = run_ae_spe_analysis(file_path, progress_callback=example_callback)