# -*- coding: utf-8 -*-
""" 
Conditional DDPM with Feature Anchoring for Fault Diagnosis
Version: 1.2
Author: Dr. X
Last modified: 2023-11-20
"""

import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
import pytorch_lightning as pl

# --------------------------
# 核心组件
# --------------------------

class IndustrialTimeEmbedding(nn.Module):
    """ 面向振动信号特性的时间编码 """
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        half_dim = dim // 2
        self.freqs = 1.0 / (10000 ** (torch.arange(0, half_dim, 2) / float(half_dim)))
        
    def forward(self, t):
        device = t.device
        freqs = self.freqs.to(device)
        args = t[:, None] * freqs[None, :]
        embedding = torch.cat([torch.sin(args), torch.cos(args)], dim=-1)
        return embedding

class ConditionInjectBlock(nn.Module):
    """ 多尺度特征注入模块 """
    def __init__(self, cond_dim, channels):
        super().__init__()
        self.cond_proj = nn.Sequential(
            nn.Linear(cond_dim, 2*channels),
            nn.GELU(),
            nn.Linear(2*channels, 2*channels)
        )
        self.adaptive_norm = nn.GroupNorm(4, channels)
        
    def forward(self, x, cond):
        # x: [B, C, L]; cond: [B, D]
        scale_shift = self.cond_proj(cond) # [B, 2*C]   
        scale, shift = torch.chunk(scale_shift, 2, dim=1) # 各 [B, C]
        # 扩展到时序维度
        scale = scale.unsqueeze(-1) # [B, C, 1]
        shift = shift.unsqueeze(-1)
        x = self.adaptive_norm(x)
        return x * (1 + scale) + shift

class IndustrialUNet(nn.Module):
    """ 面向振动信号处理的改进UNet """
    def __init__(self, in_channels=1, cond_dim=256):
        super().__init__()
        
        # 下采样路径
        self.down1 = nn.Sequential(
            nn.Conv1d(in_channels, 64, 15, padding=7),
            ConditionInjectBlock(cond_dim, 64),
            nn.GELU(),
            nn.MaxPool1d(4)
        )
        
        self.down2 = nn.Sequential(
            nn.Conv1d(64, 192, 9, padding=4),  # 改为192通道
            ConditionInjectBlock(cond_dim, 192),
            nn.GELU(),
            nn.MaxPool1d(4)
        )
        
        # 中间特征处理
        self.mid_conv = nn.Conv1d(192, 256, 5, padding=2)  # 192->256
        self.mid_attn = nn.MultiheadAttention(256, 4)
        
        # 上采样路径
        self.up1 = nn.Sequential(
            nn.ConvTranspose1d(256, 192, 4, stride=4),  # 256->192
            ConditionInjectBlock(cond_dim, 192),
            nn.GELU()
        )
        
        self.up2 = nn.Sequential(
            nn.ConvTranspose1d(192+64, 64, 4, stride=4),  # 考虑skip connection (192+64)->64
            ConditionInjectBlock(cond_dim, 64),
            nn.GELU()
        )
        
        self.final_conv = nn.Conv1d(64+1, in_channels, 3, padding=1)  # 考虑skip connection (64+1)->1

        # 添加特征提取点
        self.feature_points = {}
    
    def get_feature_point(self, name):
        return self.feature_points.get(name, None)
    
    def forward(self, x, t, cond):
        self.feature_points = {}  # 清空特征点字典
        
        # 条件特征投影
        cond = cond.unsqueeze(-1)
        
        # 下采样
        d1 = self.down1[0](x)
        d1 = self.down1[1](d1, cond.squeeze(-1))
        d1 = self.down1[2](d1)
        d1 = self.down1[3](d1)
        self.feature_points['down1'] = d1  # 记录特征
        
        d2 = self.down2[0](d1)
        d2 = self.down2[1](d2, cond.squeeze(-1))
        d2 = self.down2[2](d2)
        d2 = self.down2[3](d2)
        self.feature_points['down2'] = d2  # 记录特征
        
        # 中间处理
        m = self.mid_conv(d2)
        m = rearrange(m, 'b c l -> l b c')
        m, _ = self.mid_attn(m, m, m)
        m = rearrange(m, 'l b c -> b c l')
        self.feature_points['mid'] = m  # 记录特征
        
        # 上采样
        u1 = self.up1[0](m)
        u1 = self.up1[1](u1, cond.squeeze(-1))
        u1 = self.up1[2](u1)
        u1 = torch.cat([u1, d1], dim=1)
        
        u2 = self.up2[0](u1)
        u2 = self.up2[1](u2, cond.squeeze(-1))
        u2 = self.up2[2](u2)
        u2 = torch.cat([u2, x], dim=1)
        
        return self.final_conv(u2)

# --------------------------
# DDPM算法主体
# --------------------------

class ConditionedDDPM(nn.Module):
    """ 面向工业诊断的条件扩散模型 """
    def __init__(self, 
                 feature_extractor=None,  # 设为可选参数
                 num_classes=5,
                 cond_dim=256,
                 timesteps=1000,
                 beta_schedule='industrial',
                 use_features=True,
                 feature_matching_weight=0.1):  # 添加特征匹配权重参数
        super().__init__()
        
        self.use_features = use_features
        
        # 特征提取器相关（仅在use_features=True时使用）
        if use_features:
            if feature_extractor is None:
                raise ValueError("When use_features is True, feature_extractor must be provided")
            
            # 特征提取器不应该作为模型参数的一部分，而是作为外部引用
            self.feature_extractor = feature_extractor
            # 获取特征维度
            with torch.no_grad():
                device = next(feature_extractor.parameters()).device
                dummy_input = torch.randn(1, 1, 1024, device=device)
                features = feature_extractor.get_condition_features(dummy_input)
                feature_dim = features.shape[1] * features.shape[2]
                print(f"特征维度计算 - 特征维度: {feature_dim}")
            
            self.feature_compressor = nn.Linear(feature_dim, cond_dim)
            # 特征和标签融合
            self.cond_fusion = nn.Sequential(
                nn.Linear(cond_dim * 2, cond_dim),
                nn.GELU(),
                nn.Linear(cond_dim, cond_dim)
            )
            
            # 添加特征匹配投影层
            self.feature_matching_projections = nn.ModuleDict({
                'down1': nn.Linear(64, cond_dim),    # 根据UNet中的通道数调整
                'down2': nn.Linear(192, cond_dim),
                'mid': nn.Linear(256, cond_dim)
            })
        
        # 标签编码器
        self.label_embedding = nn.Embedding(num_classes, cond_dim)
        
        # 其他组件保持不变
        if beta_schedule == 'industrial':
            betas = self._industrial_beta_schedule(timesteps)
        else:
            betas = torch.linspace(1e-4, 0.02, timesteps)
            
        # 注册为buffer确保设备一致性
        self.register_buffer('betas', betas)
        self.register_buffer('alphas', 1. - self.betas)
        self.register_buffer('alphas_cumprod', torch.cumprod(self.alphas, dim=0))
        self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(self.alphas_cumprod))
        self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - self.alphas_cumprod))
        
        self.time_embed = IndustrialTimeEmbedding(128)
        self.unet = IndustrialUNet(cond_dim=cond_dim)
        self.feature_matching_weight = feature_matching_weight
    
    def _industrial_beta_schedule(self, T):
        """ 针对振动信号特性的调度策略 """
        phase1 = torch.linspace(1e-6, 1e-3, T//2)
        phase2 = torch.linspace(1e-3, 2e-2, T//2)
        return torch.cat([phase1, phase2])
    
    def _extract_condition(self, x_real, labels):
        """从真实样本提取锚定特征并融合标签信息"""
        # 获取标签嵌入
        label_emb = self.label_embedding(labels)
        
        if self.use_features:
            # 如果使用特征条件，则提取并融合
            with torch.no_grad():
                features = self.feature_extractor.get_condition_features(x_real)
                features = features.reshape(features.shape[0], -1)
            
            compressed_features = self.feature_compressor(features)
            combined_cond = torch.cat([compressed_features, label_emb], dim=-1)
            return self.cond_fusion(combined_cond)
        else:
            # 如果只使用标签条件，直接返回标签嵌入
            return label_emb
    
    def compute_feature_matching_loss(self, unet_features, cond):
        """计算特征匹配损失"""
        if not self.use_features:
            return 0.0
            
        total_loss = 0.0
        batch_size = cond.shape[0]
        
        for name, feat in unet_features.items():
            # 将特征图转换为向量
            feat = feat.mean(dim=-1)  # [B, C, L] -> [B, C]
            # 投影到条件空间
            proj_feat = self.feature_matching_projections[name](feat)
            
            # 计算余弦相似度损失
            norm_feat = F.normalize(proj_feat, dim=1)
            norm_cond = F.normalize(cond, dim=1)
            cosine_sim = torch.sum(norm_feat * norm_cond, dim=1)
            loss = 1.0 - cosine_sim.mean()
            
            # 添加协方差匹配损失
            feat_centered = proj_feat - proj_feat.mean(dim=0, keepdim=True)
            cond_centered = cond - cond.mean(dim=0, keepdim=True)
            feat_cov = (feat_centered.T @ feat_centered) / (batch_size - 1)
            cond_cov = (cond_centered.T @ cond_centered) / (batch_size - 1)
            cov_loss = F.mse_loss(feat_cov, cond_cov)
            
            total_loss += loss + cov_loss
            
        return total_loss / len(unet_features)
    
    def forward(self, x_real, labels):
        """ 训练阶段前向传播 
        Args:
            x_real: [B, 1, 1024] - 真实信号
            labels: [B] - 对应的标签
        """
        # 提取并融合条件特征
        cond = self._extract_condition(x_real, labels)
        
        # 扩散过程
        batch_size = x_real.shape[0]
        t = torch.randint(0, len(self.betas), (batch_size,), device=x_real.device)
        noise = torch.randn_like(x_real)
        
        sqrt_alpha = self.sqrt_alphas_cumprod[t].view(-1, 1, 1)
        sqrt_one_minus_alpha = self.sqrt_one_minus_alphas_cumprod[t].view(-1, 1, 1)
        x_noisy = sqrt_alpha * x_real + sqrt_one_minus_alpha * noise
        
        # 时间编码
        t_emb = self.time_embed(t)
        
        # 去噪预测
        pred_noise = self.unet(x_noisy, t_emb, cond)
        
        # 计算主损失（MSE）
        mse_loss = F.mse_loss(pred_noise, noise)
        
        # 计算特征匹配损失
        feature_matching_loss = self.compute_feature_matching_loss(
            self.unet.feature_points, cond
        )
        
        # 组合损失
        total_loss = mse_loss + self.feature_matching_weight * feature_matching_loss
        
        return total_loss, mse_loss, feature_matching_loss
    
    @torch.no_grad()
    def sample(self, labels, cond_features=None, num_samples=1):
        """条件采样过程
        Args:
            labels: 类别标签
            cond_features: 可选的特征条件，如果为None且use_features=True，则生成随机特征
            num_samples: 每个条件生成的样本数
        """
        device = labels.device
        
        # 扩展标签以匹配样本数
        if num_samples > 1:
            labels = repeat(labels, 'b -> (b n)', n=num_samples)
        
        # 获取条件信息
        if self.use_features:
            if cond_features is None:
                # 生成随机特征
                feature_dim = self.feature_compressor.in_features
                random_features = torch.randn(len(labels), feature_dim, device=device)
                compressed_features = self.feature_compressor(random_features)
            else:
                # 使用提供的特征
                if num_samples > 1:
                    cond_features = repeat(cond_features, 'b d -> (b n) d', n=num_samples)
                compressed_features = self.feature_compressor(cond_features)
            
            # 获取并组合条件
            label_emb = self.label_embedding(labels)
            combined_cond = torch.cat([compressed_features, label_emb], dim=-1)
            cond = self.cond_fusion(combined_cond)
        else:
            # 只使用标签条件
            cond = self.label_embedding(labels)
        
        # 采样过程
        x = torch.randn(len(labels), 1, 1024, device=device)
        
        for t in reversed(range(0, len(self.betas))):
            t_batch = torch.full((len(labels),), t, device=device)
            t_emb = self.time_embed(t_batch)
            
            pred_noise = self.unet(x, t_emb, cond)
            
            alpha_t = self.alphas[t]
            alpha_cumprod_t = self.alphas_cumprod[t]
            beta_t = self.betas[t]
            
            if t > 0:
                noise = torch.randn_like(x)
            else:
                noise = torch.zeros_like(x)
                
            x = (1 / torch.sqrt(alpha_t)) * (
                x - (beta_t / torch.sqrt(1 - alpha_cumprod_t)) * pred_noise
            ) + torch.sqrt(beta_t) * noise
            
        return x, labels
    

class DDPMTrainer(pl.LightningModule):
    """ PyTorch Lightning 训练模块 """
    def __init__(self, ddpm_model, lr=2e-4):
        super().__init__()
        self.ddpm = ddpm_model
        self.lr = lr

    def training_step(self, batch, batch_idx):
        # 解包数据
        data, label = batch
        x_real = data
        labels = label

        # 计算损失
        total_loss, mse_loss, feature_matching_loss = self.ddpm(x_real, labels)
        
        self.log('loss/train_loss', total_loss, prog_bar=True)
        self.log('loss/mse_loss', mse_loss, prog_bar=True)
        self.log('loss/feature_matching_loss', feature_matching_loss, prog_bar=True)
        return total_loss

    def configure_optimizers(self):
        optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
        return optimizer


if __name__ == "__main__":
    pass

