import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt

transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize(0.5, 0.5)])
train_dataset = datasets.MNIST(root='../dataset', train=True, transform=transform, download=False)
trainloader = Data.DataLoader(train_dataset, batch_size=128, shuffle=True)
test_datasets = datasets.MNIST(root='../dataset', train=False, transform=transform, download=False)
testloader = Data.DataLoader(test_datasets, batch_size=128, shuffle=False)

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('using %s' % device)

orig_imgs = next(iter(trainloader))[0]
def showimg(imgs, size):
    imgs = imgs/2 + 0.5
    plt.style.use('dark_background')
    plt.figure(figsize=(size, size))
    plt.axis("off")
    plt.imshow(np.transpose(vutils.make_grid(imgs[:size**2].detach(), nrow=size), (1,2,0)), cmap='gray')
    plt.show()
# showimg(orig_imgs, 10)

class Generator(nn.Module):
    def __init__(self):
        super().__init__()
        self.main = nn.Sequential(
            nn.ConvTranspose2d(100, 32, 4, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True),
            nn.ConvTranspose2d(32, 64, 4, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU(True),
            nn.ConvTranspose2d(32, 1, 4, stride=2, padding=1, bias=False),
            nn.Tanh()
        )
    def forward(self, z):
        return self.main(z)

class Discriminator(nn.Module):
    def __init__(self):
        super().__init__()
        self.mean = nn.Sequential(
            nn.Conv2d(1, 32, 4, stride=2, padding=1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(32, 64, 4, stride=2, padding=1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(64, 32, 4, stride=2, padding=1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(32, 1, 4, stride=1, padding=0, bias=False)
        )
    def forward(self, x):
        return self.mean(x)

def train(netG, netD, epoch_num, save=False, save_name='WDCGAN_Generator_%d.pth'):
    optimizerG = torch.optim.RMSprop(netG.parameters(), lr=0.0001)
    optimizerD = torch.optim.RMSprop(netD.parameters(), lr=0.0001)
    L_list = []

    print('training...')
    for epoch in range(epoch_num):
        L, count = 0, 0
        for i, data in enumerate(trainloader):
            optimizerD.zero_grad()
            real_imgs = data[0].to(device)
            noise = torch.randn(real_imgs.size(0), 100, 1, 1, dtype=torch.float, device=device)
            fake_imgs = netG(noise)
            loss_D = torch.mean(torch.squeeze(netD(fake_imgs.detach()))) - torch.mean(torch.squeeze(netD(real_imgs)))
            L -= loss_D
            loss_D.backward()
            optimizerD.step()
            for p in netD.parameters():
                p.data.clamp_(-0.01, 0.01)

            if i % 2 == 0:
                optimizerG.zero_grad()
                loss_G = -torch.mean(torch.squeeze(netD(fake_imgs)))
                loss_G.backward()
                optimizerG.step()
            count += 1

        L /= count
        L_list.append(L)
        print('L after epoch %d: %.4f' % (epoch+1, L))

    if save:
        torch.save(netG.state_dict(), save_name%epoch_num)
        print('The model has been saved.')
    return L_list

netG = Generator().to(device)
netD = Discriminator().to(device)
# l = train(netG, netD, 100, save=True)
# plt.plot(l)
# plt.xlabel('epoch')
# plt.ylabel('L')
# plt.savefig('WDCGAN')

netG.load_state_dict(torch.load('./WDCGAN_Generator_100.pth'))
netG.eval()
noise = torch.randn(100, 100, 1, 1, device=device)
imgs = netG(noise)
imgs = imgs.cpu()
# flatten_imgs = imgs.view(-1)
# for i, pixle in enumerate(flatten_imgs):
#     if pixle<0.3: flatten_imgs[i]=0
# imgs = flatten_imgs.view(imgs.size())
showimg(imgs, 10)