import torch
import torch.nn as nn
import torch.nn.functional as F


# 深度解耦网络：分解为故障类别、工况、故障直径三个特征子空间
class TripleDecouplingNetwork(nn.Module):
    def __init__(self, feature_dim=128):
        super().__init__()
        self.feature_dim = feature_dim

        # 共享特征提取器
        self.shared_extractor = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),  # 32x32 -> 16x16
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2)  # 16x16 -> 8x8
        )

        # 故障类别特征分支
        self.fault_class_branch = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((4, 4)),  # 8x8 -> 4x4
            nn.Flatten(),
            nn.Linear(256 * 4 * 4, 512),
            nn.ReLU(),
            nn.Linear(512, feature_dim),
            nn.LayerNorm(feature_dim)
        )

        # 工况特征分支
        self.condition_branch = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((4, 4)),
            nn.Flatten(),
            nn.Linear(256 * 4 * 4, 512),
            nn.ReLU(),
            nn.Linear(512, feature_dim),
            nn.LayerNorm(feature_dim)
        )

        # 故障直径特征分支
        self.fault_diameter_branch = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d((4, 4)),
            nn.Flatten(),
            nn.Linear(256 * 4 * 4, 512),
            nn.ReLU(),
            nn.Linear(512, feature_dim),
            nn.LayerNorm(feature_dim)
        )

        # 解码器：用于重构输入，确保特征包含足够信息
        self.decoder = nn.Sequential(
            nn.Linear(3 * feature_dim, 256 * 4 * 4),  # 三个特征子空间合并
            nn.ReLU(),
            nn.Unflatten(1, (256, 4, 4)),

            nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),  # 4x4 -> 8x8
            nn.ReLU(),

            nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),  # 8x8 -> 16x16
            nn.ReLU(),

            nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1),  # 16x16 -> 32x32
            nn.ReLU(),

            nn.Conv2d(32, 3, kernel_size=3, padding=1),
            nn.Sigmoid()  # 输出RGB图像，值在[0,1]
        )

        # 分类器：用于监督训练各特征子空间
        self.fault_class_classifier = nn.Linear(feature_dim, 4)  # 正常+3类故障
        self.condition_classifier = nn.Linear(feature_dim, 3)  # 0-3马力
        self.diameter_classifier = nn.Linear(feature_dim, 4)  # 0.007,0.014,0.021英寸

    def forward(self, x):
        # 提取共享特征
        shared_feat = self.shared_extractor(x)  # [batch, 256, 8, 8]

        # 提取三个特征子空间
        fault_class_feat = self.fault_class_branch(shared_feat)
        condition_feat = self.condition_branch(shared_feat)
        fault_diameter_feat = self.fault_diameter_branch(shared_feat)

        # 特征重构
        combined_feat = torch.cat([fault_class_feat, condition_feat, fault_diameter_feat], dim=1)
        reconstructed_x = self.decoder(combined_feat)

        # 分类输出（用于监督损失）
        fault_class_pred = self.fault_class_classifier(fault_class_feat)
        condition_pred = self.condition_classifier(condition_feat)
        diameter_pred = self.diameter_classifier(fault_diameter_feat)

        return {
            'fault_class_feat': fault_class_feat,
            'condition_feat': condition_feat,
            'fault_diameter_feat': fault_diameter_feat,
            'reconstructed_x': reconstructed_x,
            'fault_class_pred': fault_class_pred,
            'condition_pred': condition_pred,
            'diameter_pred': diameter_pred
        }


# 三特征子空间解耦损失函数
class TripleDecouplingLoss(nn.Module):
    def __init__(self, lambda_recon=1.0, lambda_corr=1.0, lambda_supervised=1.0):
        super().__init__()
        self.lambda_recon = lambda_recon  # 重构损失权重
        self.lambda_corr = lambda_corr  # 相关性损失权重
        self.lambda_supervised = lambda_supervised  # 监督损失权重
        self.mse_loss = nn.MSELoss()
        self.cross_entropy_loss = nn.CrossEntropyLoss()

    def forward(self, x, outputs, condition_labels, fault_class_labels, diameter_labels):
        # 1. 重构损失
        recon_loss = self.mse_loss(outputs['reconstructed_x'], x)

        # 2. 监督损失
        fault_class_loss = self.cross_entropy_loss(outputs['fault_class_pred'], fault_class_labels)
        condition_loss = self.cross_entropy_loss(outputs['condition_pred'], condition_labels)
        diameter_loss = self.cross_entropy_loss(outputs['diameter_pred'], diameter_labels)
        supervised_loss = fault_class_loss + condition_loss + diameter_loss

        # 3. 相关性损失：最小化三个特征子空间之间的相关性
        feats = [
            outputs['fault_class_feat'],
            outputs['condition_feat'],
            outputs['fault_diameter_feat']
        ]

        corr_loss = 0.0
        # 计算每对特征子空间之间的相关性
        for i in range(len(feats)):
            for j in range(i + 1, len(feats)):
                # 特征中心化
                feat_i_centered = feats[i] - torch.mean(feats[i], dim=0, keepdim=True)
                feat_j_centered = feats[j] - torch.mean(feats[j], dim=0, keepdim=True)

                # 计算协方差矩阵并求Frobenius范数
                cov_matrix = torch.matmul(feat_i_centered.T, feat_j_centered) / (feats[i].size(0) - 1)
                corr_loss += torch.norm(cov_matrix, p='fro')

        # 总损失
        total_loss = (self.lambda_recon * recon_loss +
                      self.lambda_corr * corr_loss +
                      self.lambda_supervised * supervised_loss)

        # 返回总损失和各部分损失值
        return total_loss, {
            'reconstruction': recon_loss,
            'correlation': corr_loss,
            'fault_class': fault_class_loss,
            'condition': condition_loss,
            'diameter': diameter_loss
        }


# 测试网络
if __name__ == "__main__":
    # 创建随机输入 (batch_size=8, 3 channels, 32x32)
    x = torch.randn(8, 3, 32, 32)

    # 初始化网络
    model = TripleDecouplingNetwork(feature_dim=128)

    # 前向传播
    outputs = model(x)

    # 打印输出形状
    print(f"故障类别特征: {outputs['fault_class_feat'].shape}")
    print(f"工况特征: {outputs['condition_feat'].shape}")
    print(f"故障直径特征: {outputs['fault_diameter_feat'].shape}")
    print(f"重构图像: {outputs['reconstructed_x'].shape}")
    print(f"故障类别预测: {outputs['fault_class_pred'].shape}")
    print(f"工况预测: {outputs['condition_pred'].shape}")
    print(f"故障直径预测: {outputs['diameter_pred'].shape}")

    # 测试损失函数
    criterion = TripleDecouplingLoss()
    # 创建随机标签
    fault_labels = torch.randint(0, 4, (8,))  # 0-3: 正常+3类故障
    condition_labels = torch.randint(0, 4, (8,))  # 0-3: 四种工况
    diameter_labels = torch.randint(0, 3, (8,))  # 0-2: 三种故障直径

    loss, metrics = criterion(x, outputs, fault_labels, condition_labels, diameter_labels)
    print(f"总损失: {loss.item()}")
    print("各部分损失:", metrics)
