from pathlib import Path
from torch.utils.data import DataLoader
import torch
from torch import optim, nn
from gdatasets.cifar import CIFARDataset
from gdatasets import beta_schedule, alpha_bar
from gdatasets.cifar_diffusion import DiffusionDataset
from stable_diffusion.ddpm import DDPM

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

base_path = Path(r"D:\datasets\image\cifar-10-batches-py")

step_num = 1000
beta = beta_schedule(step_num)
alpha_bar_val = alpha_bar(beta)

set_train = CIFARDataset(base_path, mode="train")
set_test = CIFARDataset(base_path, mode="test")
sets = {
    "train": DiffusionDataset(set_train, alpha_bar_val, step_num),
    "test": DiffusionDataset(set_test, alpha_bar_val, step_num)
    }


loaders = {key: DataLoader(sets[key], batch_size=128, shuffle=True) for key in sets}

model = DDPM().to(device)
loss_fn = nn.MSELoss()
optimizer = optim.Adagrad(model.parameters(), lr=1e-4, weight_decay=1e-5)

epoch_num = 1000


best_loss = 1e+19
for i in range(epoch_num):
    
    for mode in loaders:
        loss_val = 0.0
        with torch.set_grad_enabled(mode=="train"):

            for img, t, labels in loaders[mode]:
                img = img.to(device)
                t = t.to(device)
                labels = labels.to(device)
                outputs = model(img, t)

                loss = loss_fn(outputs, labels)

                loss_val += loss.item()

                if mode == "train":
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

            
            print("mode:{} epoch:[{}/{}] lossVal:{:.4f}".format(mode, i + 1, epoch_num, loss_val / len(sets[mode])))

            if loss_val < best_loss:
                best_loss = loss_val

                torch.save(model.state_dict(), "./best_model")