import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms import transforms

from torchvision import datasets

import numpy as np
import matplotlib.pyplot as plt

start_time = time.time()
plt.rcParams['image.cmap'] = 'gray'

class discriminator(nn.Module):
    def __init__(self, img_size=784, enbeding_size=10):
        super(discriminator, self).__init__()
        self.main = nn.Sequential(
            nn.Linear(img_size, 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 512),
            nn.LeakyReLU(0.2),
            nn.Linear(512, 1),
            nn.Sigmoid()
        )
        self.embedding = nn.Embedding(enbeding_size, img_size)

    def forward(self, input, y, labels=None):
        y = y.view(-1)
        # print(self.embedding(y).shape)
        return self.main(input * self.embedding(y))


class generator(nn.Module):
    def __init__(self, noisy_size=256, img_size=784, enbeding_size=10):
        super(generator, self).__init__()
        self.main = nn.Sequential(
            nn.Linear(noisy_size, 1024),
            nn.ReLU(),
            nn.Linear(1024, 1024),
            nn.ReLU(),
            nn.Linear(1024, img_size),
            nn.Tanh()
        )
        self.embedding = nn.Embedding(enbeding_size, noisy_size)


    def forward(self, input, y, labels=None):
        y = y.view(-1)
        return self.main(input * self.embedding(y))


def show_images(images):
    sqrtn = int(np.ceil(np.sqrt(images.shape[0])))

    for index, image in enumerate(images):
        plt.subplot(sqrtn, sqrtn, index+1)
        plt.imshow(image.reshape(28, 28))


# Discriminator Loss => BCELoss
def d_loss_function(inputs, targets):
    return nn.BCELoss()(inputs, targets)


def g_loss_function(inputs):
    targets = torch.ones([inputs.shape[0], 1])
    targets = targets.to(device)
    return nn.BCELoss()(inputs, targets)


# GPU
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('GPU State:', device)

# Model
G = generator().to(device)
D = discriminator().to(device)
print(G)
print(D)


# Settings
epochs = 200
lr = 0.0002
batch_size = 64
g_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
d_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))

# Transform
transform = transforms.Compose([transforms.ToTensor(),
                                transforms.Normalize((0.5,), (0.5,))])


# Load data
train_set = datasets.MNIST('data/', train=True, download=True, transform=transform)
test_set = datasets.MNIST('data/', train=False, download=True, transform=transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)


# Train
for epoch in range(epochs):
    epoch += 1
    d_mean = 0
    for times, data in enumerate(train_loader):
        times += 1
        real_inputs = data[0].to(device)
        img_label = data[1].to(device)
        real_inputs = real_inputs.view(-1, 784)
        real_outputs = D(real_inputs, img_label)
        real_label = torch.ones(real_inputs.shape[0], 1).to(device)

        noise = (torch.rand(real_inputs.shape[0], 256) - 0.5) / 0.5
        noise = noise.to(device)
        fake_inputs = G(noise, img_label)
        fake_outputs = D(fake_inputs, img_label)
        d_mean += fake_outputs.mean()
        fake_label = torch.zeros(fake_inputs.shape[0], 1).to(device)

        outputs = torch.cat((real_outputs, fake_outputs), 0)
        targets = torch.cat((real_label, fake_label), 0)

        # Zero the parameter gradients
        d_optimizer.zero_grad()

        # Backward propagation
        d_loss = d_loss_function(outputs, targets)
        d_loss.backward()
        d_optimizer.step()

        # Generator
        noise = (torch.rand(real_inputs.shape[0], 256)-0.5)/0.5
        noise = noise.to(device)

        fake_inputs = G(noise, img_label)
        fake_outputs = D(fake_inputs, img_label)

        g_loss = g_loss_function(fake_outputs)
        g_optimizer.zero_grad()
        g_loss.backward()
        g_optimizer.step()

        if times % 100 == 0 or times == len(train_loader):
            print('[{}/{}, {}/{}] D_loss: {:.3f}  G_loss: {:.3f} D_mean: {:.3f}'.format(epoch, epochs, times, len(train_loader), d_loss.item(), g_loss.item(), d_mean.item()/(100)))
            d_mean = 0
    # imgs_numpy = (fake_inputs.data.cpu().numpy()+1.0)/2.0
    # show_images(imgs_numpy[:16])
    # plt.show()

    if epoch % 50 == 0:
        torch.save(G.state_dict(), 'cgan_epoch_{}.pkl'.format(epoch))
        print('Model saved.')
        break


print('Training Finished.')
print('Cost Time: {}s'.format(time.time()-start_time))