import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset
from scipy.stats import gaussian_kde


# ==========================
# 1. 读取 TE 数据集
# ==========================
def load_te_data():
    """
    读取 Tennessee Eastman (TE) 数据集并进行归一化处理：
    - 只使用正常数据 d00.dat（500×52）作为训练集
    - 使用故障数据 d01 - d21 作为测试集来评估模型
    """
    current_dir = os.path.dirname(os.path.abspath(__file__))
    data_dir = os.path.join(current_dir, "data")

    # 正常数据文件路径
    normal_file = os.path.join(data_dir, "d00.dat")
    normal_test_file = os.path.join(data_dir, "d00_te.dat")

    # 故障数据文件路径
    fault_test_files = [os.path.join(data_dir, f"d{str(i).zfill(2)}_te.dat") for i in range(1, 22)]

    # **读取正常数据**
    normal_train = pd.read_csv(normal_file, sep="\s+", header=None).values  # 500 行 52 列
    normal_test = pd.read_csv(normal_test_file, sep="\s+", header=None).values  # 960 行 52 列

    # **只选择正常数据用于训练**
    normal_train = normal_train[:500, :52]  # 正常数据训练集，500 行，52 列
    normal_test = normal_test[:, :52]  # 正常测试数据

    # **读取所有故障数据用于测试**
    fault_test = [pd.read_csv(f, sep="\s+", header=None).values[:, :52] for f in fault_test_files]

    # **归一化**
    scaler = StandardScaler()
    train_data = scaler.fit_transform(normal_train)
    normal_test = scaler.transform(normal_test)
    fault_test = [scaler.transform(ft) for ft in fault_test]

    return (torch.tensor(train_data, dtype=torch.float32),
            torch.tensor(normal_test, dtype=torch.float32),
            [torch.tensor(ft, dtype=torch.float32) for ft in fault_test])


# ==========================
# 2. 图神经网络组件
# ==========================
class GraphConvolution(nn.Module):
    """
    图卷积层：实现传感器节点之间的空间依赖关系建模
    """
    def __init__(self, in_features, out_features):
        super(GraphConvolution, self).__init__()
        self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))
        self.bias = nn.Parameter(torch.FloatTensor(out_features))
        self.reset_parameters()
        
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        nn.init.zeros_(self.bias)
        
    def forward(self, x, adj):
        # x: 节点特征矩阵 [batch_size, num_nodes, in_features]
        # adj: 邻接矩阵 [num_nodes, num_nodes]
        
        # 图卷积操作: X' = AXW
        support = torch.matmul(x, self.weight)  # [batch_size, num_nodes, out_features]
        output = torch.matmul(adj.unsqueeze(0), support) + self.bias
        return output


# ==========================
# 3. 多头自注意力机制
# ==========================
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)

    def forward(self, x):
        return self.attn(x, x, x)[0]


# ==========================
# 4. Transformer块
# ==========================
class TransformerBlock(nn.Module):
    def __init__(self, embed_dim, num_heads, hidden_dim, dropout=0.1):
        super().__init__()
        self.attn = MultiHeadSelfAttention(embed_dim, num_heads)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.ff = nn.Sequential(
            nn.Linear(embed_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, embed_dim)
        )
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.norm1(x + self.dropout(self.attn(x)))
        x = self.norm2(x + self.dropout(self.ff(x)))
        return x


# ==========================
# 5. RAE-GNN-Transformer模型
# ==========================
class RAE_GNN_Transformer(nn.Module):
    def __init__(self, input_dim, embed_dim=16, num_heads=2, num_layers=4, 
                 hidden_dim=32, latent_dim=8, dropout=0.1):
        super().__init__()
        self.input_dim = input_dim
        self.latent_dim = latent_dim
        
        # 编码器部分 - 残差自编码器 (RAE)
        self.embedding = nn.Linear(input_dim, embed_dim)
        self.encoder_layers = nn.Sequential(
            *[TransformerBlock(embed_dim, num_heads, hidden_dim, dropout) for _ in range(num_layers)]
        )
        self.encoder_out = nn.Linear(embed_dim, latent_dim)
        
        # 图神经网络部分 (GNN)
        # 创建邻接矩阵 - 假设传感器之间的连接是基于相关性的
        self.register_buffer('adj_matrix', self._create_adjacency_matrix(input_dim))
        self.gcn1 = GraphConvolution(latent_dim, latent_dim)
        self.gcn2 = GraphConvolution(latent_dim, latent_dim)
        self.gcn_activation = nn.ReLU()
        
        # 残差连接
        self.residual = nn.Linear(latent_dim, latent_dim)
        
        # 解码器部分 - Transformer
        self.decoder_layers = nn.Sequential(
            *[TransformerBlock(latent_dim, num_heads, hidden_dim, dropout) for _ in range(3)]
        )
        self.decoder = nn.Linear(self.input_dim * latent_dim, input_dim)

    def _create_adjacency_matrix(self, num_nodes, threshold=0.5):
        """
        创建邻接矩阵 - 基于传感器之间的假设相关性
        在实际应用中，这可以基于领域知识或数据相关性分析来构建
        """
        # 创建基础邻接矩阵 (对角线为1)
        adj_matrix = torch.eye(num_nodes)
        
        # 添加一些随机连接模拟传感器之间的相关性
        # 在实际应用中，这应该基于真实的传感器相关性
        for i in range(num_nodes):
            for j in range(num_nodes):
                if i != j and torch.rand(1).item() < threshold:
                    adj_matrix[i, j] = 1.0
                    adj_matrix[j, i] = 1.0  # 保持对称性
        
        # 归一化邻接矩阵 (D^(-1/2) * A * D^(-1/2))
        degree = torch.sum(adj_matrix, dim=1)
        degree_inv_sqrt = torch.pow(degree, -0.5)
        degree_inv_sqrt[degree_inv_sqrt == float('inf')] = 0
        degree_inv_sqrt_matrix = torch.diag(degree_inv_sqrt)
        
        normalized_adj = torch.mm(torch.mm(degree_inv_sqrt_matrix, adj_matrix), degree_inv_sqrt_matrix)
        return normalized_adj

    def forward(self, x):
        batch_size = x.size(0)
        
        # 编码阶段 - 使用Transformer提取特征
        x_embedded = self.embedding(x)  # [batch_size, input_dim] -> [batch_size, embed_dim]
        
        # 重塑为[batch_size, 1, embed_dim]以适应Transformer
        x_embedded = x_embedded.unsqueeze(1)  # [batch_size, 1, embed_dim]
        encoded = self.encoder_layers(x_embedded)  # [batch_size, 1, embed_dim]
        latent = self.encoder_out(encoded)  # [batch_size, 1, latent_dim]
        
        # 提取特征向量
        latent = latent.squeeze(1)  # [batch_size, latent_dim]
        
        # 创建节点特征矩阵 - 每个样本的每个节点都使用相同的特征
        node_features = latent.unsqueeze(1).expand(-1, self.input_dim, -1)  # [batch_size, input_dim, latent_dim]
        
        # 图卷积操作 - 捕获传感器之间的空间依赖关系
        gcn_out = self.gcn_activation(self.gcn1(node_features, self.adj_matrix))
        gcn_out = self.gcn2(gcn_out, self.adj_matrix)
        
        # 残差连接
        residual_out = self.residual(node_features)
        enhanced_features = gcn_out + residual_out  # 残差连接
        
        # 解码阶段 - 使用Transformer重构输入
        decoded = self.decoder_layers(enhanced_features)
        
        # 最终输出层 - 重构原始输入
        # 首先将每个节点的特征合并
        decoded_flat = decoded.view(batch_size, self.input_dim * self.latent_dim)
        reconstructed = self.decoder(decoded_flat)  # [batch_size, input_dim]
        
        return reconstructed


# ==========================
# 6. 训练模型
# ==========================
def train_model(model, train_loader, validation_data, epochs=300, lr=0.001, patience=20):
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=0.01)
    criterion = nn.MSELoss()
    train_losses = []
    validation_losses = []

    best_val_loss = float('inf')
    patience_counter = 0
    best_model_state = None

    for epoch in range(epochs):
        model.train()
        total_loss = 0
        for batch in train_loader:
            batch = batch[0]
            optimizer.zero_grad()
            output = model(batch)
            loss = criterion(output, batch)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
            optimizer.step()
            total_loss += loss.item()

        avg_train_loss = total_loss / len(train_loader)
        train_losses.append(avg_train_loss)

        model.eval()
        with torch.no_grad():
            val_output = model(validation_data)
            val_loss = criterion(val_output, validation_data)
            validation_losses.append(val_loss.item())

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            patience_counter = 0
            best_model_state = model.state_dict()
        else:
            patience_counter += 1

        if patience_counter >= patience:
            print(f"Early stopping at epoch {epoch}")
            model.load_state_dict(best_model_state)
            break

        if epoch % 10 == 0:
            print(f'Epoch {epoch}/{epochs}, Train Loss: {avg_train_loss:.6f}, Val Loss: {val_loss:.6f}')

    return train_losses, validation_losses


# ==========================
# 7. 故障检测评估
# ==========================
def calculate_threshold(model, validation_data, percentile=95):
    """使用验证集的重构误差分布和KDE来计算阈值"""
    with torch.no_grad():
        reconstructed = model(validation_data)
        errors = torch.mean((reconstructed - validation_data) ** 2, dim=1)
        errors_np = errors.numpy()

    # 使用KDE估计概率密度
    kde = gaussian_kde(errors_np)
    x_range = np.linspace(errors_np.min(), errors_np.max(), 1000)
    pdf = kde(x_range)

    # 计算累积分布
    cdf = np.cumsum(pdf) / np.sum(pdf)
    # 找到最接近percentile/100的CDF值对应的x
    threshold_idx = np.argmin(np.abs(cdf - percentile / 100))
    threshold = x_range[threshold_idx]

    return threshold, kde


def calculate_metrics(model, normal_test_data, fault_test_data, threshold):
    """计算FAR和每种故障类型的FDR"""
    with torch.no_grad():
        # 计算FAR
        normal_reconstructed = model(normal_test_data)
        normal_errors = torch.mean((normal_reconstructed - normal_test_data) ** 2, dim=1)
        N_false_alarms = (normal_errors > threshold).sum().item()
        FAR = N_false_alarms / len(normal_test_data) * 100

        # 计算每种故障的FDR
        FDRs = []
        error_distributions = []
        kdes = []  # 存储每种故障的KDE

        for fault_data in fault_test_data:
            fault_reconstructed = model(fault_data)
            fault_errors = torch.mean((fault_reconstructed - fault_data) ** 2, dim=1)
            N_detected = (fault_errors > threshold).sum().item()
            FDR = N_detected / len(fault_data) * 100
            FDRs.append(FDR)

            fault_errors_np = fault_errors.numpy()
            error_distributions.append(fault_errors_np)
            kdes.append(gaussian_kde(fault_errors_np))

    normal_errors_np = normal_errors.numpy()
    normal_kde = gaussian_kde(normal_errors_np)

    return FAR, FDRs, error_distributions, normal_errors_np, kdes, normal_kde


# ==========================
# 8. 主函数
# ==========================
def main():
    # 读取数据
    train_data, normal_test_data, fault_test_data = load_te_data()

    # 划分训练集和验证集
    val_size = int(0.2 * len(train_data))
    train_data, val_data = train_data[val_size:], train_data[:val_size]

    # 数据加载器
    batch_size = 32
    train_loader = DataLoader(TensorDataset(train_data), batch_size=batch_size, shuffle=True)

    # 初始化并训练模型
    input_dim = train_data.shape[1]
    model = RAE_GNN_Transformer(input_dim=input_dim, dropout=0.1)
    print("开始训练RAE-GNN-Transformer模型...")
    train_losses, val_losses = train_model(model, train_loader, val_data, epochs=300)

    # 计算阈值
    threshold, val_kde = calculate_threshold(model, val_data)
    print(f"\n计算得到的阈值：{threshold:.6f}")

    # 进行故障检测评估
    print("\n开始故障检测评估...")
    FAR, FDRs, fault_errors_list, normal_errors, fault_kdes, normal_kde = calculate_metrics(
        model, normal_test_data, fault_test_data, threshold)

    print(f"\n整体误报率(FAR) = {FAR:.2f}%")
    print("\n各故障类型的检出率(FDR)：")
    for i, FDR in enumerate(FDRs, 1):
        print(f"故障 {i:2d}: FDR = {FDR:.2f}%")

    # 可视化结果
    plt.figure(figsize=(15, 5))

    # 训练损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label="Training Loss")
    plt.plot(val_losses, label="Validation Loss")
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.title("Training and Validation Loss")
    plt.legend()

    # 使用KDE展示误差分布
    plt.subplot(1, 2, 2)
    x_range = np.linspace(0, max(normal_errors.max(),
                                 max(max(err) for err in fault_errors_list)), 1000)

    # 绘制正常数据的KDE
    normal_density = normal_kde(x_range)
    plt.plot(x_range, normal_density, label='Normal', alpha=0.7)

    # 绘制前5种故障的KDE
    for i, kde in enumerate(fault_kdes[:5]):
        fault_density = kde(x_range)
        plt.plot(x_range, fault_density, label=f'Fault {i + 1}', alpha=0.7)

    plt.axvline(x=threshold, color='r', linestyle='--', label='Threshold')
    plt.xlabel("Reconstruction Error")
    plt.ylabel("Density")
    plt.title("Error Distribution (KDE)")
    plt.legend()

    plt.tight_layout()
    plt.show()

    # FDR柱状图
    plt.figure(figsize=(10, 5))
    plt.bar(range(1, 22), FDRs)
    plt.xlabel("Fault Types")
    plt.ylabel("FDR (%)")
    plt.title(f"Fault Detection Rate for Each Fault Type (FAR = {FAR:.2f}%)")
    plt.tight_layout()
    plt.show()


if __name__ == "__main__":
    main()