import torch
import torch.nn as nn

class Discriminator(nn.Module):
  def __init__(self, channels_img, features_d):
    super(Discriminator, self).__init__()

    self.net = nn.Sequential(
        # N x channels_img x 64 x 64
        nn.Conv2d(channels_img, features_d, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_d),
        nn.LeakyReLU(0.2),

        # N x features_d x 32 x 32
        nn.Conv2d(features_d, features_d*2, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_d*2),
        nn.LeakyReLU(0.2),

        # N x features_d*2 x 16 x 16
        nn.Conv2d(features_d*2, features_d*4, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_d*4),
        nn.LeakyReLU(0.2),

        # N x features_d*4 x 8 x 8
        nn.Conv2d(features_d*4, features_d*8, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_d*8),
        nn.LeakyReLU(0.2),

        # N x features_d*8 x 4 x 4
        nn.Conv2d(features_d*8, 1, kernel_size=4, stride=2, padding=0),

        # N x 1 x 1 x 1
        nn.Sigmoid()
    )

  def forward(self, x):
    return self.net(x)


class Generator(nn.Module):
  def __init__(self, channels_noise, channels_img, features_g):
    super(Generator, self).__init__()

    self.net = nn.Sequential(
        # N x channels_noise x 1 x 1
        nn.ConvTranspose2d(channels_noise, features_g*16, kernel_size=4, stride=1, padding=0),
        nn.BatchNorm2d(features_g*16),
        nn.LeakyReLU(0.2),

        # N x features_g*16 x 4 x 4
        nn.ConvTranspose2d(features_g*16, features_g*8, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_g*8),
        nn.LeakyReLU(0.2),

        # N x features*8 x 8 x 8
        nn.ConvTranspose2d(features_g*8, features_g*4, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_g*4),
        nn.LeakyReLU(0.2),

        # N x features*4 x 16 x 16
        nn.ConvTranspose2d(features_g*4, features_g*2, kernel_size=4, stride=2, padding=1),
        nn.BatchNorm2d(features_g*2),
        nn.LeakyReLU(0.2),

        # N x features*2 x 32 x 32
        nn.ConvTranspose2d(features_g*2, channels_img, kernel_size=4, stride=2, padding=1),
        nn.Tanh()
    )

  def forward(self, x):
    return self.net(x)


import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms

from torch.utils.data import DataLoader
# from torch.utils.tensorboard import SummaryWriter

lr_D = 0.0002
lr_G = 0.0002
batch_size = 64
image_size = 64
channels_img = 1
channels_noise = 256
num_epochs = 10

features_d = 16
features_g = 16

custom_transforms = transforms.Compose([
  transforms.Resize(image_size),
  transforms.ToTensor(),
  transforms.Normalize((0.5,), (0.5,))
])

dataset = datasets.MNIST(root='data/', train=True, transform=custom_transforms, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
  
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

net_D = Discriminator(channels_img, features_d).to(device)
net_G = Generator(channels_noise, channels_img, features_g).to(device)

optimizator_D = optim.Adam(net_D.parameters(), lr=lr_D)
optimizator_G = optim.Adam(net_G.parameters(), lr=lr_G)

net_D.train()
net_G.train()

criterion = nn.BCELoss()

real_label = 1
fake_label = 0
fixed_noise = torch.randn(64, channels_noise, 1, 1).to(device)

# writer_real = SummaryWriter(f'runs/MNIST_GAN/test_real')
# writer_fake = SummaryWriter(f'runs/MNIST_GAN/test_fake')

print('Start training')

step = 0
D_mean = 0
index = 0
for epoch in range(num_epochs):
  for batch_idx, (data, targets) in enumerate(dataloader):
    index += 1
    ### Discriminator training
    data = data.to(device)
    batch_size = data.shape[0]

    net_D.zero_grad()
    labels = (torch.ones(batch_size)*0.9).to(device)

    output = net_D(data).reshape(-1)
    loss_D_real = criterion(output, labels)

    # D_mean += output.mean()

    noise = torch.randn(batch_size, channels_noise, 1, 1).to(device)
    fake = net_G(noise)
    labels = (torch.ones(batch_size)*0.1).to(device)

    output = net_D(fake.detach()).reshape(-1)
    loss_D_fake = criterion(output, labels)
    D_mean = output.mean()

    loss_D = loss_D_real + loss_D_fake
    
    loss_D.backward()

    optimizator_D.step()
    
    ### Generator training
    net_G.zero_grad()
    labels = torch.ones(batch_size).to(device)
    output = net_D(fake).reshape(-1)
    loss_G = criterion(output, labels)
    loss_G.backward()
    optimizator_G.step()

    if batch_idx % 150 == 0:
      print(f'Epoch {epoch}/{num_epochs}, Batch {batch_idx}/{len(dataloader)} \
       Loss D {loss_D:.4f}, Loss G: {loss_G:.4f}, D_mean: {D_mean:.4f}')

    if batch_idx % 500 == 0 :
        torch.save(net_G.state_dict(), "./gan_model.pkl")
    #   with torch.no_grad():
    #     fake = net_G(fixed_noise)
    #     img_grid_real = torchvision.utils.make_grid(data[:32], normalize=True)
    #     img_grid_fake = torchvision.utils.make_grid(fake[:32], normalize=True)
    #     writer_real.add_image('MNIST real images', img_grid_real, global_step=step)
    #     writer_fake.add_image('MNIST fake images', img_grid_fake, global_step=step)

    step += 1