import torch
import numpy as np
import random
from tqdm import tqdm
from diffusion import UNet
from torchvision.transforms import transforms
import cv2

# 逆向过程采样
@torch.no_grad()
def reverse_diffusion(model:UNet, x_t, mask, timesteps=1000):
    device = x_t.device
    batch_size = x_t.shape[0]
    
    # 贝塔值
    beta_schedule = torch.tensor([1/timesteps]*timesteps)
    
    for time_step in tqdm(reversed(range(timesteps))):
        t = torch.full((batch_size,), time_step, device=device, dtype=torch.long)
        beta_t = beta_schedule[time_step]
        alpha_t = 1. - beta_t
        alpha_bar_t = torch.prod(1. - beta_schedule[:time_step+1])
        
        # 预测噪声
        predicted_noise = model(x_t, mask, t)
        
        # 无噪声分量
        if time_step > 0:
            noise = torch.randn_like(x_t) * mask
        else:
            noise = 0
            
        # 逆向更新方程
        x_t_mask = x_t * mask
        x_t_mask = 1 / torch.sqrt(alpha_t) * (
            x_t_mask - beta_t / torch.sqrt(1 - alpha_bar_t) * predicted_noise
        ) + torch.sqrt(beta_t) * noise

        d_mask = torch.ones_like(mask)
        d_mask[mask==1] = 0
        x_t = x_t * d_mask + x_t_mask
        
    return x_t

def prediction(model:UNet, img_path, save_path, device):
    model.eval()
    img = cv2.imread(img_path)
    h, w = img.shape[:2]

    rand_x = random.randint(0, max(0, w-128-1))
    rand_y = random.randint(0, max(0, h-128-1))
    rand_w = random.randint(50, 128)
    rand_h = random.randint(50, 128)
    mask = np.zeros([img.shape[0], img.shape[1], 1], dtype=np.uint8)
    mask[rand_y:rand_y+rand_h, rand_x:rand_x+rand_w, :] = 255
    
    x0_img = transforms.ToTensor()(img)
    mask = transforms.ToTensor()(mask)
    epsilon = torch.randn_like(x0_img)
    x0_img = x0_img.unsqueeze(0)
    mask = mask.unsqueeze(0)
    epsilon = epsilon.unsqueeze(0)
    
    d_mask = torch.ones_like(mask)
    d_mask[mask==1] = 0
    xt_img = x0_img * d_mask.repeat(1, 3, 1, 1) + epsilon * mask
    
    
    # 逆向过程采样
    out = reverse_diffusion(model, xt_img.to(device), mask.to(device))

    out = torch.squeeze(out)
    img2 = out.to('cpu')
    img2 = img2.numpy()
    img2 = (img2 * 255).astype(np.uint8)
    img2 = np.transpose(img2, (1, 2, 0))
    # img2 = cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)

    # 拼接输出和输入
    h, w = img.shape[:2]
    h2, w2 = img2.shape[:2]
    m1 = np.zeros((max(h, h2), max(w, w2), 3), dtype=np.uint8)
    m2 = np.zeros((max(h, h2), max(w, w2), 3), dtype=np.uint8)
    m1[:h, :w, :] = img
    m2[:h2, :w2, :] = img2
    cat_img = np.vstack((m1, m2))

    cv2.imwrite(save_path, cat_img)

