import math
import numpy as np
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import reduce
from utils import *


class CBAM(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16, kernel_size=7):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(in_channels, reduction_ratio)
        self.spatial_attention = SpatialAttention(kernel_size)

    def forward(self, x):
        x = self.channel_attention(x)
        x = self.spatial_attention(x)
        return x
#
#
class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.max_pool = nn.AdaptiveMaxPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction_ratio, bias=False),
            nn.ReLU(),
            nn.Linear(in_channels // reduction_ratio, in_channels, bias=False),
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        b, c, _ = x.size()
        avg_out = self.fc(self.avg_pool(x).view(b, c))
        max_out = self.fc(self.max_pool(x).view(b, c))
        out = avg_out + max_out
        return self.sigmoid(out).unsqueeze(-1) * x
#
#
class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        padding = (kernel_size - 1) // 2
        self.conv = nn.Conv1d(2, 1, kernel_size=kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        concat = torch.cat([avg_out, max_out], dim=1)
        out = self.conv(concat)
        return self.sigmoid(out) * x
#
#
# class ResidualConvBlock(nn.Module):
#     def __init__(self, inc: int, outc: int, kernel_size: int, stride=1, gn=8):
#         super().__init__()
#         self.same_channels = inc == outc
#         self.conv = nn.Sequential(
#             nn.Conv1d(inc, outc, kernel_size, stride, padding=get_padding(kernel_size)),
#             nn.GroupNorm(gn, outc),
#             nn.PReLU(),
#         )
#
#     def forward(self, x: torch.Tensor) -> torch.Tensor:
#         x1 = self.conv(x)
#         return (x + x1) / 2 if self.same_channels else x1
#
#
class UnetDownWithCBAM(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, gn=8, factor=2):
        super(UnetDownWithCBAM, self).__init__()
        self.pool = nn.MaxPool1d(factor)
        self.layer = ResidualConvBlock(in_channels, out_channels, kernel_size, gn=gn)
        self.cbam = CBAM(out_channels)

    def forward(self, x):
        x = self.layer(x)
        x = self.pool(x)
        x = self.cbam(x)
        return x
#
#
# class UnetUpWithCBAM(nn.Module):
#     def __init__(self, in_channels, out_channels, kernel_size, gn=8, factor=2):
#         super(UnetUpWithCBAM, self).__init__()
#         self.pool = nn.Upsample(scale_factor=factor, mode="nearest")
#         self.layer = ResidualConvBlock(in_channels, out_channels, kernel_size, gn=gn)
#         self.cbam = CBAM(out_channels)
#
#     def forward(self, x):
#         x = self.pool(x)
#         x = self.layer(x)
#         x = self.cbam(x)
#         return x
#
#
# class ConditionalUNetWithCBAM(nn.Module):
#     def __init__(self, in_channels, n_feat=256):
#         super(ConditionalUNetWithCBAM, self).__init__()
#         self.in_channels = in_channels
#         self.n_feat = n_feat
#
#         self.d1_out = n_feat * 1
#         self.d2_out = n_feat * 2
#         self.d3_out = n_feat * 3
#         self.d4_out = n_feat * 4
#
#         self.u1_out = n_feat
#         self.u2_out = n_feat
#         self.u3_out = n_feat
#         self.u4_out = in_channels
#
#         self.sin_emb = SinusoidalPosEmb(n_feat)
#
#         self.down1 = UnetDownWithCBAM(in_channels, self.d1_out, 1, gn=8, factor=2)
#         self.down2 = UnetDownWithCBAM(self.d1_out, self.d2_out, 1, gn=8, factor=2)
#         self.down3 = UnetDownWithCBAM(self.d2_out, self.d3_out, 1, gn=8, factor=2)
#
#         self.up2 = UnetUpWithCBAM(self.d3_out, self.u2_out, 1, gn=8, factor=2)
#         self.up3 = UnetUpWithCBAM(self.u2_out + self.d2_out, self.u3_out, 1, gn=8, factor=2)
#         self.up4 = UnetUpWithCBAM(self.u3_out + self.d1_out, self.u4_out, 1, gn=8, factor=2)
#         self.out = nn.Conv1d(self.u4_out + in_channels, in_channels, 1)
#
#     def forward(self, x, t):
#         down1 = self.down1(x)
#         down2 = self.down2(down1)
#         down3 = self.down3(down2)
#
#         temb = self.sin_emb(t).view(-1, self.n_feat, 1)
#
#         up1 = self.up2(down3)
#         up2 = self.up3(torch.cat([up1 + temb, down2], 1))
#         up3 = self.up4(torch.cat([up2 + temb, down1], 1))
#         out = self.out(torch.cat([up3, x], 1))
#
#         down = (down1, down2, down3)
#         up = (up1, up2, up3)
#         return out, down, up



# class EncoderWithLSTM(nn.Module):
#     def __init__(self, in_channels, dim=512, hidden_dim=256, num_layers=2):
#         super(EncoderWithLSTM, self).__init__()
#
#         self.in_channels = in_channels
#         self.e1_out = dim
#         self.e2_out = dim
#         self.e3_out = dim
#
#         # 卷积下采样层
#         self.down1 = UnetDown(in_channels, self.e1_out, kernel_size=1, gn=8, factor=2)
#         self.down2 = UnetDown(self.e1_out, self.e2_out, kernel_size=1, gn=8, factor=2)
#         self.down3 = UnetDown(self.e2_out, self.e3_out, kernel_size=1, gn=8, factor=2)
#
#         # LSTM 层
#         self.lstm = nn.LSTM(
#             input_size=self.e3_out, hidden_size=hidden_dim, num_layers=num_layers, batch_first=True
#         )
#
#         self.avg_pooling = nn.AdaptiveAvgPool1d(output_size=1)
#         self.act = nn.Tanh()
#
#     def forward(self, x):
#         # 卷积下采样
#         dn1 = self.down1(x)  # (B, C, T/2)
#         dn2 = self.down2(dn1)  # (B, C, T/4)
#         dn3 = self.down3(dn2)  # (B, C, T/8)
#
#         # 将通道和时间步调整为 LSTM 输入格式
#         lstm_input = dn3.permute(0, 2, 1)  # (B, T, C)
#         lstm_out, _ = self.lstm(lstm_input)  # (B, T, hidden_dim)
#
#         # 取最后一个时间步的输出作为特征表示
#         z = lstm_out[:, -1, :]  # (B, hidden_dim)
#
#         return (dn1, dn2, dn3), z




# class TemporalAttention(nn.Module):
#     def __init__(self, dim, num_heads=4):
#         super(TemporalAttention, self).__init__()
#         self.attention = nn.MultiheadAttention(embed_dim=dim, num_heads=num_heads, batch_first=True)
#         self.norm = nn.LayerNorm(dim)
#
#     def forward(self, x):
#         # x: (B, T, C), where B is batch size, T is time steps, C is channels/features
#         attn_out, _ = self.attention(x, x, x)
#         return self.norm(attn_out + x)  # Residual connection



def get_padding(kernel_size, dilation=1):
    return int((kernel_size * dilation - dilation) / 2)


# Swish activation function
class Swish(nn.Module):
    def __init__(self):
        super().__init__()
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        return x * self.sigmoid(x)


class SinusoidalPosEmb(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim

    def forward(self, x):
        device = x.device
        half_dim = self.dim // 2
        emb = math.log(10000) / (half_dim - 1)
        emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
        emb = x[:, None] * emb[None, :]
        emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
        return emb


class WeightStandardizedConv1d(nn.Conv1d):
    """
    https://arxiv.org/abs/1903.10520
    weight standardization purportedly works synergistically with group normalization
    """

    def forward(self, x):
        eps = 1e-5 if x.dtype == torch.float32 else 1e-3

        weight = self.weight
        mean = reduce(weight, "o ... -> o 1 1", "mean")
        var = reduce(weight, "o ... -> o 1 1", partial(torch.var, unbiased=False))
        normalized_weight = (weight - mean) * (var + eps).rsqrt()

        return F.conv1d(
            x,
            normalized_weight,
            self.bias,
            self.stride,
            self.padding,
            self.dilation,
            self.groups,
        )


class ResidualConvBlock(nn.Module):
    def __init__(self, inc: int, outc: int, kernel_size: int, stride=1, gn=8):
        super().__init__()
        """
        standard ResNet style convolutional block
        """
        self.same_channels = inc == outc
        self.ks = kernel_size
        self.conv = nn.Sequential(
            WeightStandardizedConv1d(inc, outc, self.ks, stride, get_padding(self.ks)),
            nn.GroupNorm(gn, outc),
            nn.PReLU(),
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x1 = self.conv(x)
        if self.same_channels:
            out = (x + x1) / 2
        else:
            out = x1
        return out


class UnetDown(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, gn=8, factor=2):
        super(UnetDown, self).__init__()
        self.pool = nn.MaxPool1d(factor)
        self.layer = ResidualConvBlock(in_channels, out_channels, kernel_size, gn=gn)

    def forward(self, x):
        x = self.layer(x)
        x = self.pool(x)
        return x


class UnetUp(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, gn=8, factor=2):
        super(UnetUp, self).__init__()
        self.pool = nn.Upsample(scale_factor=factor, mode="nearest")
        self.layer = ResidualConvBlock(in_channels, out_channels, kernel_size, gn=gn)

    def forward(self, x):
        x = self.pool(x)
        x = self.layer(x)
        return x


class EmbedFC(nn.Module):
    def __init__(self, input_dim, emb_dim):
        super(EmbedFC, self).__init__()
        """
        generic one layer FC NN for embedding things  
        """
        self.input_dim = input_dim
        layers = [
            nn.Linear(input_dim, emb_dim),
            nn.PReLU(),
            nn.Linear(emb_dim, emb_dim),
        ]
        self.model = nn.Sequential(*layers)

    def forward(self, x):
        x = x.view(-1, self.input_dim)
        return self.model(x)


class ConditionalUNet(nn.Module):
    def __init__(self, in_channels, n_feat=256):
        super(ConditionalUNet, self).__init__()

        self.in_channels = in_channels
        self.n_feat = n_feat

        self.d1_out = n_feat * 1
        self.d2_out = n_feat * 2
        self.d3_out = n_feat * 3
        self.d4_out = n_feat * 4

        self.u1_out = n_feat
        self.u2_out = n_feat
        self.u3_out = n_feat
        self.u4_out = in_channels

        self.sin_emb = SinusoidalPosEmb(n_feat)
        # self.timeembed1 = EmbedFC(n_feat, self.u1_out)
        # self.timeembed2 = EmbedFC(n_feat, self.u2_out)
        # self.timeembed3 = EmbedFC(n_feat, self.u3_out)

        self.down1 = UnetDown(in_channels, self.d1_out, 1, gn=8, factor=2)
        self.down2 = UnetDown(self.d1_out, self.d2_out, 1, gn=8, factor=2)
        self.down3 = UnetDown(self.d2_out, self.d3_out, 1, gn=8, factor=2)



        self.up2 = UnetUp(self.d3_out, self.u2_out, 1, gn=8, factor=2)
        self.up3 = UnetUp(self.u2_out + self.d2_out, self.u3_out, 1, gn=8, factor=2)
        self.up4 = UnetUp(self.u3_out + self.d1_out, self.u4_out, 1, gn=8, factor=2)
        self.out = nn.Conv1d(self.u4_out + in_channels, in_channels, 1)

    def forward(self, x, t):
        down1 = self.down1(x)  # 2000 -> 1000
        down2 = self.down2(down1)  # 1000 -> 500
        down3 = self.down3(down2)  # 500 -> 250

        temb = self.sin_emb(t).view(-1, self.n_feat, 1)  # [b, n_feat, 1]

        up1 = self.up2(down3)  # 250 -> 500
        # # 动态调整时间维度
        # min_length = min((up1 + temb).shape[-1], down2.shape[-1])
        # up1 = up1[:, :, :min_length]
        # down2 = down2[:, :, :min_length]
        up2 = self.up3(torch.cat([up1 + temb, down2], 1))  # 500 -> 1000
        up3 = self.up4(torch.cat([up2 + temb, down1], 1))  # 1000 -> 2000
        out = self.out(torch.cat([up3, x], 1))  # 2000 -> 2000

        down = (down1, down2, down3)
        up = (up1, up2, up3)
        return out, down, up

class TCNBlock(nn.Module):
    def __init__(self, in_channels, dilation=1):
        super().__init__()
        self.conv = nn.Conv1d(in_channels, in_channels, kernel_size=3, padding=dilation, dilation=dilation)
        self.norm = nn.GroupNorm(8, in_channels)
        self.act = nn.PReLU()

    def forward(self, x):
        return self.act(self.norm(self.conv(x) + x))


class Encoder(nn.Module):
    def __init__(self, in_channels, dim=512):
        super().__init__()
        # self.down1 = UnetDown(in_channels, dim, kernel_size=1, gn=8, factor=2)
        # self.down2 = UnetDown(dim, dim, kernel_size=1, gn=8, factor=2)
        # self.down3 = UnetDown(dim, dim, kernel_size=1, gn=8, factor=2)
        self.down1 = UnetDownWithCBAM(in_channels, dim, kernel_size=1, gn=8, factor=2)
        self.down2 = UnetDownWithCBAM(dim, dim, kernel_size=1, gn=8, factor=2)
        self.down3 = UnetDownWithCBAM(dim, dim, kernel_size=1, gn=8, factor=2)

        # 添加 TCN 模块
        self.tcn = nn.Sequential(
            TCNBlock(dim, dilation=1),
            TCNBlock(dim, dilation=2),
        )

        self.avg_pooling = nn.AdaptiveAvgPool1d(1)
        self.e3_out = dim

    def forward(self, x0):
        # 下采样
        dn1 = self.down1(x0)  # (B, C, T/2)
        dn2 = self.down2(dn1)  # (B, C, T/4)
        dn3 = self.down3(dn2)  # (B, C, T/8)

        # TCN 处理
        dn3 = self.tcn(dn3)  # (B, C, T/8)

        # 全局平均池化
        z = self.avg_pooling(dn3).view(-1, self.e3_out)  # (B, dim)
        down = (dn1, dn2, dn3)
        return (down, z)


import torch.nn as nn
from torchstat import stat  # 模型统计工具

# 替换普通卷积为深度可分离卷积
class DepthwiseSeparableConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super().__init__()
        self.depthwise = nn.Conv1d(
            in_channels, in_channels, kernel_size,
            stride=stride, padding=padding, groups=in_channels
        )
        self.pointwise = nn.Conv1d(in_channels, out_channels, 1)

    def forward(self, x):
        x = self.depthwise(x)
        x = self.pointwise(x)
        return x

# 轻量化 Encoder
class LiteEncoder(nn.Module):
    def __init__(self, in_channels, dim=256):  # 减少通道数
        super().__init__()
        # 使用深度可分离卷积下采样
        self.down1 = nn.Sequential(
            DepthwiseSeparableConv(in_channels, dim, kernel_size=3, padding=1),
            nn.MaxPool1d(2)
        )
        self.down2 = nn.Sequential(
            DepthwiseSeparableConv(dim, dim, kernel_size=3, padding=1),
            nn.MaxPool1d(2)
        )
        self.down3 = nn.Sequential(
            DepthwiseSeparableConv(dim, dim, kernel_size=3, padding=1),
            nn.MaxPool1d(2)
        )
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(dim, dim)  # 轻量化全连接层

    def forward(self, x):
        dn1 = self.down1(x)  # (B, 256, T/2)
        dn2 = self.down2(dn1)  # (B, 256, T/4)
        dn3 = self.down3(dn2)  # (B, 256, T/8)
        z = self.avg_pool(dn3).squeeze(-1)  # (B, 256)
        z = self.fc(z)  # (B, 256)
        return (dn1, dn2, dn3), z

# 轻量化分类器
class LiteClassifier(nn.Module):
    def __init__(self, input_dim, hidden_dim=128, num_classes=2):
        super().__init__()
        self.model = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(inplace=True),  # 减少内存占用
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, num_classes)
        )

    def forward(self, x):
        return self.model(x).squeeze(dim=1)

# class Encoder(nn.Module):
#     def __init__(self, in_channels, dim=512):
#         super(Encoder, self).__init__()
#
#         self.in_channels = in_channels
#         self.e1_out = dim
#         self.e2_out = dim
#         self.e3_out = dim
#
#         self.down1 = UnetDown(in_channels, self.e1_out, 1, gn=8, factor=2)
#         self.down2 = UnetDown(self.e1_out, self.e2_out, 1, gn=8, factor=2)
#         self.down3 = UnetDown(self.e2_out, self.e3_out, 1, gn=8, factor=2)
#
#         self.avg_pooling = nn.AdaptiveAvgPool1d(output_size=1)
#         self.max_pooling = nn.AdaptiveMaxPool1d(output_size=1)
#         self.act = nn.Tanh()
#
#     def forward(self, x0):
#         # Down sampling
#         dn1 = self.down1(x0)  # 2048 -> 1024
#         dn2 = self.down2(dn1)  # 1024 -> 512
#         dn3 = self.down3(dn2)  # 512 -> 256
#         z = self.avg_pooling(dn3).view(-1, self.e3_out)  # [b, features]
#         down = (dn1, dn2, dn3)
#         out = (down, z)
#         return out


class Decoder(nn.Module):
    def __init__(self, in_channels, n_feat=256, encoder_dim=512, n_classes=2):
        super(Decoder, self).__init__()

        self.in_channels = in_channels
        self.n_feat = n_feat
        self.n_classes = n_classes
        self.e1_out = encoder_dim
        self.e2_out = encoder_dim
        self.e3_out = encoder_dim
        self.d1_out = n_feat
        self.d2_out = n_feat * 2
        self.d3_out = n_feat * 3
        self.u1_out = n_feat
        self.u2_out = n_feat
        self.u3_out = n_feat
        self.u4_out = in_channels

        # self.sin_emb = SinusoidalPosEmb(n_feat)
        # self.timeembed1 = EmbedFC(n_feat, self.e3_out)
        # self.timeembed2 = EmbedFC(n_feat, self.u2_out)
        # self.timeembed3 = EmbedFC(n_feat, self.u3_out)
        # self.contextembed1 = EmbedFC(self.e3_out, self.e3_out)
        # self.contextembed2 = EmbedFC(self.e3_out, self.u2_out)
        # self.contextembed3 = EmbedFC(self.e3_out, self.u3_out)

        # Unet up sampling
        self.up1 = UnetUp(self.d3_out + self.e3_out, self.u2_out, 1, gn=8, factor=2)
        self.up2 = UnetUp(self.d2_out + self.u2_out, self.u3_out, 1, gn=8, factor=2)
        self.up3 = nn.Sequential(
            nn.Upsample(scale_factor=2, mode="nearest"),
            nn.Conv1d(
                self.d1_out + self.u3_out + in_channels * 2, in_channels, 1, 1, 0
            ),
        )

        # self.out = nn.Conv1d(self.u4_out+in_channels, in_channels, 1)
        self.pool = nn.AvgPool1d(2)

    def forward(self, x0, encoder_out, diffusion_out):
        # Encoder output
        down, z = encoder_out
        dn1, dn2, dn3 = down

        # DDPM output
        x_hat, down_ddpm, up, t = diffusion_out
        dn11, dn22, dn33 = down_ddpm

        # embed context, time step
        # temb = self.sin_emb(t).view(-1, self.n_feat, 1) # [b, n_feat, 1]
        # temb1 = self.timeembed1(temb).view(-1, self.e3_out, 1) # [b, features]
        # temb2 = self.timeembed2(temb).view(-1, self.u2_out, 1) # [b, features]
        # temb3 = self.timeembed3(temb).view(-1, self.u3_out, 1) # [b, features]
        # ct2 = self.contextembed2(z).view(-1, self.u2_out, 1) # [b, n_feat, 1]
        # ct3 = self.contextembed3(z).view(-1, self.u3_out, 1) # [b, n_feat, 1]

        # Up sampling
        up1 = self.up1(torch.cat([dn3, dn33.detach()], 1))
        up2 = self.up2(torch.cat([up1, dn22.detach()], 1))
        out = self.up3(
            torch.cat([self.pool(x0), self.pool(x_hat.detach()), up2, dn11.detach()], 1)
        )
        return out


class DiffEWithDDPMFusion(nn.Module):
    def __init__(self, encoder, decoder, fc, n_feat=128):
        super(DiffEWithDDPMFusion, self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.fc = fc

        # 根据n_feat计算实际输入通道数
        ddpm_in_channels = n_feat * 3  # 128*3=384

        # DDPM特征处理模块（适配83872时间步）
        self.ddpm_feat_proj = nn.Sequential(
            nn.AdaptiveAvgPool1d(64),  # 将长时序压缩到64点
            nn.Conv1d(ddpm_in_channels, 512, kernel_size=3, padding=1),  # [B,512,64]
            nn.PReLU(),
            nn.Flatten(),
            nn.Linear(512 * 64, 512)  # 最终与编码器维度对齐
        )

        # 分类器输入维度调整为 512(编码器)+512(DDPM)=1024
        self.fc.linear_out[0] = nn.Linear(512 + 512, self.fc.linear_out[0].out_features)

    def forward(self, x0, ddpm_out):
        encoder_out = self.encoder(x0)  # [B,512]
        ddpm_features = ddpm_out[1][-1]  # 获取最深层的DDPM特征 [B,768,10484]

        # 处理DDPM特征
        proj_ddpm = self.ddpm_feat_proj(ddpm_features)  # [B,512]

        # 动态特征融合
        fused_feat = torch.cat([encoder_out[1], proj_ddpm], dim=1)  # [B,1024]

        decoder_out = self.decoder(x0, encoder_out, ddpm_out)
        fc_out = self.fc(fused_feat)

        return decoder_out, fc_out




class DiffE(nn.Module):
    def __init__(self, encoder, decoder, fc):
        super(DiffE, self).__init__()

        self.encoder = encoder
        self.decoder = decoder
        self.fc = fc

    def forward(self, x0, ddpm_out):
        encoder_out = self.encoder(x0)
        decoder_out = self.decoder(x0, encoder_out, ddpm_out)
        fc_out = self.fc(encoder_out[1])
        # print(f"fc_out 形状: {fc_out.shape}")
        return decoder_out, fc_out


class DecoderNoDiff(nn.Module):
    def __init__(self, in_channels, n_feat=256, encoder_dim=512, n_classes=2):
        super(DecoderNoDiff, self).__init__()

        self.in_channels = in_channels
        self.n_feat = n_feat
        self.n_classes = n_classes
        self.e1_out = encoder_dim
        self.e2_out = encoder_dim
        self.e3_out = encoder_dim
        self.u1_out = n_feat
        self.u2_out = n_feat
        self.u3_out = n_feat
        self.u4_out = n_feat

        self.sin_emb = SinusoidalPosEmb(n_feat)
        self.timeembed1 = EmbedFC(n_feat, self.e3_out)
        self.timeembed2 = EmbedFC(n_feat, self.u2_out)
        self.timeembed3 = EmbedFC(n_feat, self.u3_out)
        self.contextembed1 = EmbedFC(self.e3_out, self.e3_out)
        self.contextembed2 = EmbedFC(self.e3_out, self.u2_out)
        self.contextembed3 = EmbedFC(self.e3_out, self.u3_out)

        # Unet up sampling
        self.up2 = UnetUp(self.e3_out, self.u2_out, 1, gn=8, factor=2)
        self.up3 = UnetUp(self.e2_out + self.u2_out, self.u3_out, 1, gn=8, factor=2)
        # self.up4 = UnetUp(self.e1_out+self.u3_out, self.u4_out, 1, 1, gn=in_channels, factor=2, is_res=True)
        self.up4 = nn.Sequential(
            nn.Upsample(scale_factor=2, mode="nearest"),
            nn.Conv1d(self.u3_out + self.e1_out + in_channels, in_channels, 1, 1, 0),
        )

        self.out = nn.Conv1d(self.u4_out, in_channels, 1)
        self.pool = nn.AvgPool1d(2)

    def forward(self, x0, x_hat, encoder_out, t):
        down, z = encoder_out
        dn1, dn2, dn3 = down
        tembd = self.sin_emb(t).view(-1, self.n_feat, 1)  # [b, n_feat, 1]
        tembd1 = self.timeembed1(self.sin_emb(t)).view(
            -1, self.e3_out, 1
        )  # [b, n_feat, 1]
        tembd2 = self.timeembed2(self.sin_emb(t)).view(
            -1, self.u2_out, 1
        )  # [b, n_feat, 1]
        tembd3 = self.timeembed3(self.sin_emb(t)).view(
            -1, self.u3_out, 1
        )  # [b, n_feat, 1]

        # Up sampling
        ddpm_loss = F.l1_loss(x0, x_hat, reduction="none")

        up2 = self.up2(dn3)  # 256 -> 512
        up3 = self.up3(torch.cat([up2, dn2], 1))  # 512 -> 1024
        out = self.up4(
            torch.cat([self.pool(x0), self.pool(x_hat), up3, dn1], 1)
        )  # 1024 -> 2048
        # out = self.out(torch.cat([out, x_hat], 1)) # 2048 -> 2048
        # out = self.out(out)
        return out

# class DecoderNoDiff(nn.Module):
#     def __init__(self, in_channels, n_feat=256, encoder_dim=512):
#         super(DecoderNoDiff, self).__init__()
#         self.in_channels = in_channels
#         self.encoder_dim = encoder_dim
#
#         # 上采样层定义（无需时间嵌入）
#         self.up1 = UnetUp(encoder_dim, n_feat, kernel_size=1, gn=8, factor=2)
#         self.up2 = UnetUp(n_feat, n_feat, kernel_size=1, gn=8, factor=2)
#         self.up3 = UnetUp(n_feat, in_channels, kernel_size=1, gn=8, factor=2)
#         self.out = nn.Conv1d(in_channels, in_channels, 1)
#
#     def forward(self, encoder_out):
#         # encoder_out 是 Encoder 的输出特征（例如下采样的多尺度特征）
#         # 假设 encoder_out 是 Encoder 的最后一层输出，形状为 (B, encoder_dim, T/8)
#         x = self.up1(encoder_out)
#         x = self.up2(x)
#         x = self.up3(x)
#         return self.out(x)


class MLPClassifier(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.5):
        super(MLPClassifier, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, output_dim)

        # Dropout层帮助减少过拟合
        self.dropout = nn.Dropout(dropout)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=1)  # Softmax用于分类任务

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.relu(self.fc2(x))
        x = self.dropout(x)
        x = self.fc3(x)
        return self.softmax(x)


class LinearClassifier(nn.Module):
    def __init__(self, in_dim, latent_dim, emb_dim=1):
        super().__init__()
        self.linear_out = nn.Sequential(
            nn.Linear(in_features=in_dim, out_features=latent_dim),
            nn.GroupNorm(4, latent_dim),
            nn.PReLU(),
            nn.Linear(in_features=latent_dim, out_features=latent_dim),
            nn.GroupNorm(4, latent_dim),
            nn.PReLU(),
            nn.Linear(in_features=latent_dim, out_features=1),
        )

    def forward(self, x):
        x = self.linear_out(x)
        # return x
        return x.squeeze(dim=1)


def cosine_beta_schedule(timesteps, s=0.008):
    """
    cosine schedule
    as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
    """
    steps = timesteps + 1
    t = torch.linspace(0, timesteps, steps, dtype=torch.float64) / timesteps
    alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
    return torch.clip(betas, 0, 0.999)


def sigmoid_beta_schedule(timesteps, start=-3, end=3, tau=1, clamp_min=1e-5):
    """
    sigmoid schedule
    proposed in https://arxiv.org/abs/2212.11972 - Figure 8
    """
    steps = timesteps + 1
    t = torch.linspace(0, timesteps, steps, dtype=torch.float64) / timesteps
    v_start = torch.tensor(start / tau).sigmoid()
    v_end = torch.tensor(end / tau).sigmoid()
    alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (
        v_end - v_start
    )
    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
    return torch.clip(betas, 0, 0.999)


def ddpm_schedules(beta1, beta2, T):
    # assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"
    # beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1
    beta_t = cosine_beta_schedule(T, s=0.008).float()
    # beta_t = sigmoid_beta_schedule(T).float()

    alpha_t = 1 - beta_t

    log_alpha_t = torch.log(alpha_t)
    alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp()

    sqrtab = torch.sqrt(alphabar_t)

    sqrtmab = torch.sqrt(1 - alphabar_t)

    return {
        "sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}}
        "sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}
    }


class DDPM(nn.Module):
    def __init__(self, nn_model, betas, n_T, device):
        super(DDPM, self).__init__()
        self.nn_model = nn_model.to(device)

        for k, v in ddpm_schedules(betas[0], betas[1], n_T).items():
            self.register_buffer(k, v)

        self.n_T = n_T
        self.device = device

    def forward(self, x):
        _ts = torch.randint(1, self.n_T, (x.shape[0],)).to(
            self.device
        )  # t ~ Uniform(0, n_T)
        noise = torch.randn_like(x)  # eps ~ N(0, 1)

        # Ensure _ts is on the same device as x and noise
        _ts = _ts.to(x.device)

        x_t = self.sqrtab[_ts, None, None] * x + self.sqrtmab[_ts, None, None] * noise
        times = _ts / self.n_T
        output, down, up = self.nn_model(x_t, times)
        return output, down, up, noise, times
