import torch
import torch.utils.data as torchdata
from data.anim_face import Dataset
import model.dcgan as model
from model.dcgan_opts import opts
from vis.visualization import GANVisualization
import os


def save_model(g, d, stamp:str):
    output_dir = 'checkpoints/dcgan'
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    torch.save(g.state_dict(), os.path.join(output_dir, 'generator_{}.pth'.format(stamp)))
    torch.save(d.state_dict(), os.path.join(output_dir, 'discriminator_{}.pth'.format(stamp)))
    

def train(opts):
    img_size = opts.img_size
    dataset = Dataset(output_height=img_size, output_width=img_size)
    dataloader = torchdata.DataLoader(dataset, opts.batch_size, shuffle=True, drop_last=True)
    device = opts.device
    batch_size = opts.batch_size
    noise_func = model.Noise(n=batch_size, len=64, device=device)
    noise_func_vis = model.Noise(n=36, len=64, device=device)
    g = model.Generator(img_size=img_size).to(device)
    d = model.Discriminator(img_size=img_size).to(device)
    optimizer_g = torch.optim.Adam(g.parameters(), lr=.0002, betas=(0.5, 0.999))
    optimizer_d = torch.optim.Adam(d.parameters(), lr=.0002, betas=(0.5, 0.999))
    loss_func = torch.nn.BCELoss()
    vis = GANVisualization('dcgan')

    for epoch in range(opts.epochs):
        for i, tensor_image in enumerate(dataloader):
            
            # train discriminator
            tensor_image = tensor_image.to(device)
            # g.eval()
            x = noise_func()
            output_gen = d(g(x))
            output_real = d(tensor_image)
            loss_d = loss_func(output_gen, torch.zeros(batch_size, 1).to(device)) + \
                loss_func(output_real, torch.ones(batch_size, 1).to(device))
            optimizer_d.zero_grad()
            loss_d.backward()
            optimizer_d.step()

            # train generator
            # g.train()
            x = noise_func()
            output_gen = d(g(x))
            loss_g = loss_func(output_gen, torch.ones(batch_size, 1).to(device))
            optimizer_d.zero_grad()
            optimizer_g.zero_grad()
            loss_g.backward()
            optimizer_g.step()

            # visualization and save model
            if i % 10 == 0:
                print('epoch: {}; i: {};'.format(epoch, i))
                vis.vis_loss(loss_g.item(), loss_d.item())
            if i % 200 == 0:
                vis.vis_samples(g, d, noise_func_vis, caption='epoch: {}; i: {};'.format(epoch, i))
            if i % 1000 == 0: 
                save_model(g, d, '{}-{}'.format(epoch, i))


if __name__ == '__main__':
    train(opts)
