import  torch
from torch import nn, optim
from torch.nn import functional as F
from wgan import Discriminator, Generator, weights_init
import numpy as np
import os
from PIL import Image

def save_result(val_out, val_block_size, image_path, color_mode):
    def preprocess(img):
        img = ((img + 1.0) * 127.5).astype(np.uint8)
        # img = img.astype(np.uint8)
        return img

    preprocesed = preprocess(val_out)

    final_image = np.array([])
    single_row = np.array([])
    for b in range(val_out.shape[0]):
        # concat image into a row
        if single_row.size == 0:
            single_row = preprocesed[b, :, :, :]
        else:
            single_row = np.concatenate((single_row, preprocesed[b, :, :, :]), axis=1)

        # concat image row to final_image
        if (b+1) % val_block_size == 0:
            if final_image.size == 0:
                final_image = single_row
            else:
                final_image = np.concatenate((final_image, single_row), axis=0)

            # reset single row
            single_row = np.array([])

    if final_image.shape[2] == 1:
        final_image = np.squeeze(final_image, axis=2)
    # toimage(final_image).save(image_path)
    Image.fromarray(final_image).save(image_path)

if os.path.exists('images') is False:
    os.makedirs('images')

device = torch.device('cuda:0')
cpudevice = torch.device('cpu:0')
def main():

    z_dim = 100
    epochs = 3000000
    batch_size = 128
    learning_rate = 0.002

    generator = Generator(z_dim).to(device)
    generator.apply(weights_init)
    discriminator = Discriminator(3).to(device)
    discriminator.apply(weights_init)

    g_optimizer = optim.Adam(generator.parameters(), lr=learning_rate, betas=(0.5, 0.9))
    d_optimizer = optim.Adam(discriminator.parameters(), lr=learning_rate, betas=(0.5, 0.9))

    for epoch in range(epochs):

        batch_z = torch.rand(batch_size, z_dim).to(device)
        batch_x = torch.randn(batch_size, 3, 64, 64).to(device)

        # 训练D
        d_loss = d_loss_fn(generator, discriminator, batch_z, batch_x)
        d_optimizer.zero_grad()
        d_loss.backward()
        d_optimizer.step()

        # 训练G
        g_loss = g_loss_fn(generator, discriminator, batch_z)
        g_optimizer.zero_grad()
        g_loss.backward()
        g_optimizer.step()

        if epoch % 100 == 0:
            print('epoch:', epoch, 'd_loss:', d_loss.item(), 'g_loss:', g_loss.item())

            z = torch.rand(100, z_dim).to(device)
            fake_image = generator(z)
            fake_image = fake_image.permute(0, 2, 3, 1) # 修改成[b, 64, 64, 3]
            fake_image = fake_image.to(cpudevice)
            img_path = os.path.join('images', 'gan-%d.png' % epoch)
            save_result(fake_image.detach().numpy(), 10, img_path, color_mode='p')


def d_loss_fn(generator, discriminator, batch_z, batch_x):
    # 1. 训练真实的图片作为real
    # 2. 训练生成的图片作为fake
    fake_image = generator(batch_z)
    fake_logits = discriminator(fake_image)
    real_logits = discriminator(batch_x)
    real_loss = F.binary_cross_entropy_with_logits(real_logits, torch.ones_like(real_logits))
    fake_loss = F.binary_cross_entropy_with_logits(fake_logits, torch.zeros_like(fake_logits))

    gp = gradient_penalty(discriminator, batch_x, fake_image)

    loss = real_loss + fake_loss + 1. * gp

    return loss

def g_loss_fn(generator, discriminator, batch_z):
    # 训练生成的照片被判别成真的loss
    gen_image = generator(batch_z)
    logits = discriminator(gen_image)
    loss = F.binary_cross_entropy_with_logits(logits, torch.ones_like(logits))

    return loss

def gradient_penalty(discriminator, batch_x, fake_image):
    '''
    计算梯度惩罚项 x_head = t * real_image + (1 - t) * fake_image
    '''
    batchsz = batch_x.shape[0]
    t = torch.rand(batchsz, 1, 1, 1).to(device)
    t = t.expand_as(batch_x)

    x_head = t * batch_x + (1 - t) * fake_image
    x_head.requires_grad_()
    image_head = discriminator(x_head)
    grads = torch.autograd.grad(outputs=image_head, inputs=x_head,
                              grad_outputs=torch.ones_like(image_head),
                              create_graph=True, retain_graph=True, only_inputs=True)[0]

    grads = grads.view(batchsz, -1)
    gp = ((grads.norm(2, dim=1) - 1) ** 2).mean()

    return gp

if __name__ == '__main__':
    main()