import torch
import torch.nn as nn
    
# 定义UNet网络结构
class UNet(nn.Module):
    def __init__(self):
        super(UNet, self).__init__()
        # 时间步编码
        self.time_mlp = nn.Sequential(
            nn.Linear(1, 1024),
            nn.SiLU(),
            nn.Linear(1024, 1024)
        )

        # 下采样路径
        self.enc1 = self.conv_block(4, 64)
        self.enc2 = self.conv_block(64, 128)
        self.enc3 = self.conv_block(128, 256)
        self.enc4 = self.conv_block(256, 512)

        # 时间步条件
        self.time_emb_mid = nn.Linear(1024, 1024)
        # Unet底层
        self.bottleneck = self.conv_block(512, 1024)

        # 上采样路径
        self.dec4 = self.conv_block(1024 + 512, 512)
        self.dec3 = self.conv_block(512 + 256, 256)
        self.dec2 = self.conv_block(256 + 128, 128)
        self.dec1 = self.conv_block(128 + 64, 64)

        # 输出层
        self.final_layer = nn.Sequential(
            nn.Conv2d(64, 3, kernel_size=1),
            nn.Sigmoid()
        )

    def conv_block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
        )
    
    def UpsampleAndPadding(self, small_tensor, big_tensor):
        small_tensor = nn.Upsample(scale_factor=2)(small_tensor)

        b, c, ph, pw = small_tensor.shape
        h, w = big_tensor.shape[2:]
        padding_tensor = torch.zeros(b, c, h, w, device=big_tensor.device)
        padding_tensor[:, :, :ph, :pw] = small_tensor
        return padding_tensor


    def forward(self, x, mask, t):
        # 时间编码
        t_emb = self.time_mlp(t.view(-1, 1).float())

        # 下采样
        e1 = self.enc1(torch.cat([x, mask], dim=1))
        e2 = self.enc2(nn.MaxPool2d(2)(e1))
        e3 = self.enc3(nn.MaxPool2d(2)(e2))
        e4 = self.enc4(nn.MaxPool2d(2)(e3))

        # 添加时间信息
        b = self.bottleneck(nn.MaxPool2d(2)(e4)) + self.time_emb_mid(t_emb).view(-1, 1024, 1, 1)

        # 上采样（含跳跃连接）
        d4 = self.dec4(torch.cat((self.UpsampleAndPadding(b, e4), e4), dim=1))
        d3 = self.dec3(torch.cat((self.UpsampleAndPadding(d4, e3), e3), dim=1))
        d2 = self.dec2(torch.cat((self.UpsampleAndPadding(d3, e2), e2), dim=1))
        d1 = self.dec1(torch.cat((self.UpsampleAndPadding(d2, e1), e1), dim=1))

        # 输出
        out = self.final_layer(d1) * mask

        return out


# 逆向过程采样
@torch.no_grad()
def reverse_diffusion(model, x_T, mask, timesteps=1000):
    device = x_T.device
    batch_size = x_T.shape[0]
    
    # 初始化为纯噪声
    x_t = x_T
    
    # 贝塔值
    beta_schedule = torch.tensor([1/timesteps]*timesteps)
    
    for time_step in reversed(range(timesteps)):
        t = torch.full((batch_size,), time_step, device=device, dtype=torch.long)
        beta_t = beta_schedule[time_step]
        alpha_t = 1. - beta_t
        alpha_bar_t = torch.prod(1. - beta_schedule[:time_step+1])
        
        # 预测噪声
        predicted_noise = model(x_t, mask, t)
        
        # 无噪声分量
        if time_step > 0:
            noise = torch.randn_like(x_t)
        else:
            noise = 0
            
        # 逆向更新方程
        x_t_mask = x_t * mask
        x_t_mask = 1 / torch.sqrt(alpha_t) * (
            x_t_mask - beta_t / torch.sqrt(1 - alpha_bar_t) * predicted_noise
        ) + torch.sqrt(beta_t) * noise

        d_mask = torch.ones_like(mask)
        d_mask[mask==1] = 0
        x_t = x_t * d_mask + x_t_mask
        
    return x_t
