''' 
This script does conditional latent generation using a diffusion model

This code is modified from,
https://github.com/cloneofsimo/minDiffusion
and
https://github.com/TeaPearce/Conditional_Diffusion_MNIST

Diffusion model is based on DDPM,
https://arxiv.org/abs/2006.11239

The conditioning idea is taken from 'Classifier-Free Diffusion Guidance',
https://arxiv.org/abs/2207.12598

This technique also features in ImageGen 'Photorealistic Text-to-Image Diffusion Modelswith Deep Language Understanding',
https://arxiv.org/abs/2205.11487

'''
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import math
from network import DDPM,FCNN
from dataset import SinWaveDataset

def main():
    device = torch.device(
    "cuda") if torch.cuda.is_available() else torch.device("cpu")
    # hardcoding of the training parameters
    n_epoch = 5000
    batch_size = 1024
    n_T = 1000
    lrate = 1e-3

    x_size=128
    time_embed_size=64
    condition_size=3
    condition_embed_size=64

    ddpm = DDPM(nn_model=FCNN(hidden_sizes=[1000,1000,1000,1000],
                              x_size=x_size,
                              time_embed_size=time_embed_size,
                              condition_size=condition_size,
                              condition_embed_size=condition_embed_size),
                              betas=(1e-4, 0.02),
                              n_T=n_T,
                              device=device,
                              drop_prob=0.05)
    ddpm.to(device)
    # Create dataset
    train_dataset = SinWaveDataset(10000, 128)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=5)

    optim = torch.optim.Adam(ddpm.parameters(), lr=lrate)

    losses = []

    for ep in range(n_epoch):
        print(f'epoch {ep}')
        ddpm.train()

        # Linear lrate decay
        optim.param_groups[0]['lr'] = lrate*(1-ep/n_epoch)

        pbar = tqdm(train_dataloader)
        loss_ema = None
        for x, c in pbar:
            optim.zero_grad()
            x = x.to(device)
            c = c.to(device)

            loss = ddpm(x, c)
            
            loss.backward()
            if loss_ema is None:
                loss_ema = loss.item()
            else:
                loss_ema = 0.95 * loss_ema + 0.05 * loss.item()
            pbar.set_description(f"loss: {loss_ema:.4f}")
            optim.step()
            losses.append(math.log(loss_ema)/math.log(10))

        # Draw loss curve
        plt.clf()
        plt.plot(losses)
        plt.xlabel('Steps')
        plt.ylabel('Loss')
        plt.title('Training Loss Curve')
        plt.pause(0.001)

        torch.save(ddpm, "DDPM/SineTest/ddpm.pth")
        print('model saved model')



if __name__ == "__main__":
    main()