import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt

transform = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.MNIST(root='../dataset', train=True, transform=transform, download=False)
trainloader = Data.DataLoader(train_dataset, batch_size=128, shuffle=True)
test_datasets = datasets.MNIST(root='../dataset', train=False, transform=transform, download=False)
testloader = Data.DataLoader(test_datasets, batch_size=128, shuffle=False)

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('using %s' % device)

orig_imgs = next(iter(trainloader))[0]
def showimg(imgs, size):
    plt.style.use('dark_background')
    plt.figure(figsize=(size, size))
    plt.axis("off")
    plt.imshow(np.transpose(vutils.make_grid(imgs[:size**2].detach(), nrow=size, normalize=True), (1,2,0)), cmap='gray')
    plt.show()
# showimg(orig_imgs, 10)

class Generator(nn.Module):
    def __init__(self):
        super().__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(100, 32, 4, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True),
            nn.ConvTranspose2d(32, 64, 4, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True),
            nn.ConvTranspose2d(32, 1, 4, stride=2, padding=1, bias=False),
            nn.Tanh()
        )
    def forward(self, z):
        return self.main(z)

class Discriminator(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 32, 4, stride=2, padding=1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(32, 64, 4, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, 32, 4, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(32),
            nn.LeakyReLU(0.2, inplace=True),
            nn.AvgPool2d(4)
        )
        self.fc = nn.Sequential(
            nn.Linear(32, 1),
            nn.Sigmoid()
        )
    def forward(self, x):
        x = self.conv(x)
        x = x.view(-1,32)
        x = self.fc(x)
        return x

def train(netD, netG, num_epoch, save=False, save_name='DCGAN_Generator_%d.pth'):
    criterion = nn.BCELoss()
    optimizerD = torch.optim.Adam(netD.parameters())
    optimizerG = torch.optim.Adam(netG.parameters())
    Dx_l, D_Gz_l = [], []

    print('training...')
    for epoch in range(num_epoch):
        epoch_Dx, epoch_D_Gz, count = 0, 0, 0
        for data in trainloader:
            optimizerD.zero_grad()
            real_img = data[0].to(device)
            b_size = real_img.size(0)
            real_lables = torch.full((b_size,), 1, dtype=torch.float, device=device)
            fake_lables = torch.full((b_size,), 0, dtype=torch.float, device=device)
            outputs = netD(real_img).view(-1)
            loss_d_real = criterion(outputs, real_lables)
            epoch_Dx += outputs.mean().item()
            noise = torch.randn(b_size, 100, 1, 1, device=device)
            fake_img = netG(noise)
            outputs = netD(fake_img.detach()).view(-1)
            loss_d_fake = criterion(outputs, fake_lables)
            epoch_D_Gz += outputs.mean().item()
            loss_d = loss_d_real + loss_d_fake
            loss_d.backward()
            optimizerD.step()

            optimizerG.zero_grad()
            outputs = netD(fake_img).view(-1)
            loss_g = criterion(outputs, real_lables)
            epoch_D_Gz += outputs.mean().item()
            loss_g.backward()
            optimizerG.step()

            count += 1
        epoch_Dx /= count
        epoch_D_Gz /= count
        Dx_l.append(epoch_Dx)
        D_Gz_l.append(epoch_D_Gz)
        # if (epoch+1) % 10 == 0:
        print('epoch %d: D(x): %.4f  D(G(z)): %.4f' % (epoch+1, epoch_Dx, epoch_D_Gz))

    if save:
        torch.save(netG.state_dict(), save_name%num_epoch)
        print('The model has been saved.')
    return Dx_l, D_Gz_l

netG = Generator().to(device)
netD = Discriminator().to(device)
# y1, y2 = train(netD, netG, 100, save=True)
# plt.plot(y1, color='b', label='D(x)')
# plt.plot(y2, color='r', label='D(G(z))')
# plt.xlabel('epoch')
# plt.ylabel('D(x)/D(G(z))')
# plt.legend()
# plt.savefig('DCGAN')


netG.load_state_dict(torch.load('./DCGAN_Generator_100.pth'))
netG.eval()
noise = torch.randn(100, 100, 1, 1, device=device)
imgs = netG(noise)
imgs = imgs.cpu()
# flatten_imgs = imgs.view(-1)
# for i, pixle in enumerate(flatten_imgs):
#     if pixle<0.2: flatten_imgs[i]=0
# imgs = flatten_imgs.view(imgs.size())
showimg(imgs, 10)