# class TimeEmbedding(nn.Module):
#     def __init__(self, dim):
#         super().__init__()
#         self.dim = dim
#         inv_freq = torch.exp(torch.arange(0, dim, 2).float() * (-np.log(10000) / dim))
#         self.register_buffer("inv_freq", inv_freq)

#     def forward(self, t):
#         pos_enc_a = torch.sin(t[:, None] * self.inv_freq)
#         pos_enc_b = torch.cos(t[:, None] * self.inv_freq)
#         pos_enc = torch.cat([pos_enc_a, pos_enc_b], dim=-1)
#         return pos_enc

# class CondResBlock(nn.Module):
#     def __init__(self, in_ch, out_ch, cond_dim):
#         super().__init__()
#         self.mlp = nn.Sequential(
#             nn.Linear(cond_dim, out_ch*2),
#             nn.GELU()
#         )
        
#         self.conv = nn.Sequential(
#             nn.Conv1d(in_ch, out_ch, 3, padding=1),
#             nn.BatchNorm1d(out_ch),
#             nn.GELU(),
#             nn.Conv1d(out_ch, out_ch, 3, padding=1),
#             nn.BatchNorm1d(out_ch)
#         )
#         self.shortcut = nn.Conv1d(in_ch, out_ch, 1) if in_ch != out_ch else nn.Identity()

#     def forward(self, x, cond):
#         scale, shift = torch.chunk(self.mlp(cond), 2, dim=1)
#         h = self.conv(x)
#         h = h * (1 + scale.unsqueeze(-1)) + shift.unsqueeze(-1)
#         return h + self.shortcut(x)

# class UNet1D(nn.Module):
#     def __init__(self, cond_dim=64*16):
#         super().__init__()
#         self.time_embed = TimeEmbedding(128)
#         self.cond_proj = nn.Linear(cond_dim, 128)
        
#         self.down1 = nn.ModuleList([
#             CondResBlock(1, 64, 128),
#             CondResBlock(64, 64, 128)
#         ])
#         self.down2 = nn.ModuleList([
#             CondResBlock(64, 128, 128),
#             CondResBlock(128, 128, 128)
#         ])
#         self.mid = CondResBlock(128, 256, 128)
#         self.up1 = nn.ModuleList([
#             CondResBlock(256+128, 128, 128),
#             CondResBlock(128, 128, 128)
#         ])
#         self.up2 = nn.ModuleList([
#             CondResBlock(128+64, 64, 128),
#             CondResBlock(64, 64, 128)
#         ])
#         self.final = nn.Conv1d(64, 1, 1)

#     def forward(self, x, t, cond):
#         t_emb = self.time_embed(t)
#         cond_emb = self.cond_proj(cond)
#         cond = t_emb + cond_emb
        
#         # 下采样
#         x1 = x
#         for block in self.down1:
#             x1 = block(x1, cond)
#         x2 = F.max_pool1d(x1, 2)
        
#         for block in self.down2:
#             x2 = block(x2, cond)
#         x3 = F.max_pool1d(x2, 2)
        
#         # 中间层
#         x_mid = self.mid(x3, cond)
        
#         # 上采样
#         x = F.interpolate(x_mid, scale_factor=2)
#         x = torch.cat([x, x2], dim=1)
#         for block in self.up1:
#             x = block(x, cond)
            
#         x = F.interpolate(x, scale_factor=2)
#         x = torch.cat([x, x1], dim=1)
#         for block in self.up2:
#             x = block(x, cond)
            
#         return self.final(x)

# # ====================== 扩散过程 ======================
# class Diffusion:
#     def __init__(self, T=1000, beta_start=1e-4, beta_end=0.02):
#         self.T = T
#         self.betas = torch.linspace(beta_start, beta_end, T)
#         self.alphas = 1 - self.betas
#         self.alpha_bars = torch.cumprod(self.alphas, dim=0)
        
#     def add_noise(self, x0, t):
#         sqrt_alpha_bar = torch.sqrt(self.alpha_bars[t][:, None, None])
#         sqrt_one_minus_alpha_bar = torch.sqrt(1 - self.alpha_bars[t][:, None, None])
#         noise = torch.randn_like(x0)
#         xt = sqrt_alpha_bar * x0 + sqrt_one_minus_alpha_bar * noise
#         return xt, noise