# -*- coding: utf-8 -*-
"""
# @FileName:     train.py
# @AuthorName:   Sanqi Lu (Lingwei Dang)
# @Institution:  SCUT, Guangzhou, China
# @EmailAddress: lenvondang@163.com
# @CreateTime:   2024/12/22 12:36
"""
import sys
sys.path.append("")
import torch
import torch.nn as nn
from ddpm.epsilon_Prediction.ddpm_epsilon import DDPM_Epsilon
import cv2
import numpy as np
import einops
from tqdm import tqdm

from cfgs.ddpm_cfgs import unet_res_cfg
from datas.get_dataloader import get_dataloader
from ddpm.models.net import build_network

batch_size = 512
n_epochs = 100

def loss_fn(pred_x, x, loss_weight):
    return torch.mean((pred_x - x)**2 * loss_weight)

def train(ddpm: DDPM_Epsilon, net, device, ckpt_path=None, save_dir=""):
    # n_steps 就是公式里的 T
    # net 是某个继承自 torch.nn.Module 的神经网络
    n_steps = ddpm.n_steps
    dataloader = get_dataloader(batch_size)
    net = net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), 1e-3)

    for e in tqdm(range(n_epochs), total=n_epochs):
        for x, _ in dataloader: # [512, 1, 128, 128]
            current_batch_size = x.shape[0]
            x = x.to(device)
            t = torch.randint(0, n_steps, (current_batch_size, )).to(device)
            eps = torch.randn_like(x).to(device)
            x_t, alpha_t_weight = ddpm.sample_forward(x, t, eps)
            eps_theta = net(x_t, t.reshape(current_batch_size, 1)) # [b, 1, 128, 128]
            # loss = loss_fn(eps_theta, eps)
            loss = loss_fn(eps_theta, eps, alpha_t_weight)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        if e % 10 == 0:
            print(f"alpha_t_weight: {alpha_t_weight.cpu().data.numpy().reshape(-1)}")
            print(f"t: {t.cpu().data.numpy().reshape(-1)}")
            print(f"loss: {loss.cpu().data.numpy()}")
            torch.save(net.state_dict(), f"{save_dir}/model_{e:03d}.pth")

    torch.save(net.state_dict(), f"{save_dir}/last_{e:03d}.pth")

def sample_imgs(ddpm,
                net,
                n_sample=81,
                device='cuda',
                simple_var=True,
                save_dir=""):
    for model_num in range(90, -1, -10):
        model_path = f"{save_dir}/model_{model_num:03d}.pth"
        img_save_path = f"{save_dir}/img_{model_num:03d}.png"
        net.load_state_dict(torch.load(model_path))
        net = net.to(device)
        net = net.eval()
        with torch.no_grad():
            shape = (n_sample, 1, 28, 28)  # 1, 3, 28, 28
            imgs = ddpm.sample_epsilon(shape,
                                        net,
                                        device=device,
                                        simple_var=simple_var).detach().cpu()
            imgs = (imgs + 1) / 2 * 255
            imgs = imgs.clamp(0, 255)
            imgs = einops.rearrange(imgs,
                                    '(b1 b2) c h w -> (b1 h) (b2 w) c',
                                    b1=int(n_sample**0.5))

            imgs = imgs.numpy().astype(np.uint8)

            cv2.imwrite(img_save_path, imgs)

if __name__ == '__main__':
    n_steps = 1000
    config_id = 4
    device = 'cuda:0'
    save_dir = "outs/ddpm_epsilon"
    config = unet_res_cfg
    net = build_network(config, n_steps)
    ddpm = DDPM_Epsilon(device, n_steps)

    train(ddpm, net, device=device, ckpt_path=None, save_dir=save_dir)

    sample_imgs(ddpm, net, device=device, save_dir=save_dir)
