import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import math
import matplotlib.pyplot as plt

batch_size = 32
epochs = 10000
noise_dim = 200
LOADED = 0
path = "./data/*.jpg"

d_opt = keras.optimizers.RMSprop(0.0002)  # 使用Adam测试效果一般
g_opt = keras.optimizers.RMSprop(0.0002)


class Generator(keras.Model):
    def __init__(self):
        super(Generator, self).__init__()

        self.dense = layers.Dense(4 * 4 * 2048, input_shape=(noise_dim,),
                                  use_bias=False,
                                  activation="relu",
                                  kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.reshape = layers.Reshape((4, 4, 2048))

        self.conv1 = layers.Conv2DTranspose(filters=1024,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.bn1 = layers.BatchNormalization()
        self.relu1 = layers.ReLU()

        self.conv2 = layers.Conv2DTranspose(filters=512,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()

        self.conv3 = layers.Conv2DTranspose(filters=256,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.bn3 = layers.BatchNormalization()
        self.relu3 = layers.ReLU()

        self.conv4 = layers.Conv2DTranspose(filters=128,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.bn4 = layers.BatchNormalization()
        self.relu4 = layers.ReLU()

        self.conv5 = layers.Conv2DTranspose(filters=64,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02))
        self.bn5 = layers.BatchNormalization()
        self.relu5 = layers.ReLU()

        self.conv6 = layers.Conv2DTranspose(filters=3,
                                            kernel_size=5,
                                            strides=2,
                                            padding="same",
                                            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
                                            activation="tanh")

    def call(self, input):
        d1 = self.dense(input)
        r1 = self.reshape(d1)

        x_conv1 = self.conv1(r1)
        b1 = self.bn1(x_conv1)
        re1 = self.relu1(b1)

        x_conv2 = self.conv2(re1)
        b2 = self.bn2(x_conv2)
        re2 = self.relu2(b2)

        x_conv3 = self.conv3(re2)
        b3 = self.bn3(x_conv3)
        re3 = self.relu3(b3)

        x_conv4 = self.conv4(re3)
        b4 = self.bn4(x_conv4)
        re4 = self.relu4(b4)

        x_conv5 = self.conv5(re4)
        b5 = self.bn5(x_conv5)
        re5 = self.relu5(b5)

        x_conv6 = self.conv6(re5)
        return x_conv6


class Discriminator(keras.Model):
    def __init__(self):
        super(Discriminator, self).__init__()

        self.conv1 = layers.Conv2D(
            input_shape=(256, 256, 3),
            filters=256,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn1 = layers.LayerNormalization()  # 由于梯度惩罚的存在，判别器不可使用batchnormalization
        self.relu1 = layers.LeakyReLU(0.2)

        self.conv2 = layers.Conv2D(
            filters=256,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn2 = layers.LayerNormalization()
        self.relu2 = layers.LeakyReLU(0.2)

        self.conv3 = layers.Conv2D(
            filters=256,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn3 = layers.LayerNormalization()
        self.relu3 = layers.LeakyReLU(0.2)

        self.conv4 = layers.Conv2D(
            filters=512,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn4 = layers.LayerNormalization()
        self.relu4 = layers.LeakyReLU(0.2)

        self.conv5 = layers.Conv2D(
            filters=512,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn5 = layers.LayerNormalization()
        self.relu5 = layers.LeakyReLU(0.2)

        self.conv6 = layers.Conv2D(
            filters=1024,
            kernel_size=5,
            strides=2,
            padding="same",
            kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
            use_bias=False)
        self.bn6 = layers.LayerNormalization()
        self.relu6 = layers.LeakyReLU(0.2)

        self.out = layers.Conv2D(filters=1,
                                 kernel_size=4,
                                 strides=2,
                                 padding="valid",
                                 kernel_initializer=keras.initializers.random_normal(mean=0.0, stddev=0.02),
                                 use_bias=False)

    def call(self, input):
        con1 = self.conv1(input)
        b1 = self.bn1(con1)
        r1 = self.relu1(b1)

        con2 = self.conv2(r1)
        b2 = self.bn2(con2)
        r2 = self.relu2(b2)

        con3 = self.conv3(r2)
        b3 = self.bn3(con3)
        r3 = self.relu3(b3)

        con4 = self.conv4(r3)
        b4 = self.bn4(con4)
        r4 = self.relu4(b4)

        con5 = self.conv5(r4)
        b5 = self.bn5(con5)
        r5 = self.relu5(b5)

        con6 = self.conv6(r5)
        b6 = self.bn6(con6)
        r6 = self.relu6(b6)

        res = self.out(r6)
        res = tf.reshape(res, [res.shape[0], -1])
        return res


def gradient_penalty(batch_x, fake_image):
    batchsz = batch_x.shape[0]
    t = tf.random.uniform([batchsz, 1, 1, 1])
    t = tf.broadcast_to(t, batch_x.shape)
    interpolate = t * batch_x + (1 - t) * fake_image
    with tf.GradientTape() as tape:
        tape.watch([interpolate])
        d_interpolate = D(interpolate)

    grads = tape.gradient(d_interpolate, interpolate)

    grads = tf.reshape(grads, [grads.shape[0], -1])
    penalty = tf.norm(grads, axis=1)
    penalty = tf.reduce_mean((penalty - 1.) ** 2)

    return penalty


def g_loss(noise, is_training):
    fake_image = G(noise, training=is_training)
    fake_out = D(fake_image, training=is_training)
    loss = -tf.reduce_mean(fake_out)
    return loss


def d_loss(train_data, noise, is_training):
    fake_image = G(noise, training=is_training)
    fake_out = D(fake_image, training=is_training)
    real_out = D(train_data, training=is_training)

    gp = gradient_penalty(train_data, fake_image)
    loss = tf.reduce_mean(fake_out) - tf.reduce_mean(real_out) + 10. * gp
    w = tf.reduce_mean(real_out) - tf.reduce_mean(fake_out)  # 推土机距离
    return loss, w


test_noise = tf.random.normal([4, noise_dim])
G = Generator()
D = Discriminator()


# G.build(input_shape=(1,200))
# D.build(input_shape=(5,256,256,3))
# G.summary()
# D.summary()


def generate_plot_image(ii, epoch):
    pre_images = G(test_noise, training=False)
    fig = plt.figure(figsize=(2, 2))
    one = tf.ones([256, 256, 3])
    # for i in range(pre_images.shape[0]):
    for i in range(4):
        plt.subplot(2, 2, i + 1)
        plt.imshow((pre_images[i, :, :, :] + one) / 2.0)
        plt.axis('off')
    plt.savefig("picture/" + str(ii) + ".png", dpi=800)
    if ii == 0:
        plt.savefig("save/" + str(epoch) + ".png", dpi=800)
    plt.close()


# 加载图片的函数
def load_image(image_file, is_train):
    image = tf.io.read_file(image_file)
    image = tf.image.decode_jpeg(image)
    return image


train_dataset = tf.data.Dataset.list_files(path)
train_iter = iter(train_dataset)
train_datas2 = []
for x in train_iter:
    train_datas2.append(load_image(x, True))
train_datas2 = tf.stack(train_datas2, axis=0)
print('train:', train_datas2.shape)

train_datas = tf.data.Dataset.from_tensor_slices(train_datas2)

for epoch in range(epochs):
    for i, image_batch in enumerate(train_datas.shuffle(20000).batch(batch_size)):
        if i < train_datas2.shape[0] / batch_size:
            # print(i)
            image = tf.cast(image_batch, tf.float32) / 127.5 - 1  # img的分布为[-1,1]
            noise = tf.random.normal([batch_size, noise_dim])

            with tf.GradientTape() as gt:
                Dloss, gp = d_loss(image, noise, True)
            gradient_d = gt.gradient(Dloss, D.trainable_variables)
            d_opt.apply_gradients(zip(gradient_d, D.trainable_variables))

            # 判别器训练x次后，生成器训练1次
            x = 1
            if i % x == 0:
                noise = tf.random.normal([batch_size, noise_dim])
                with tf.GradientTape() as gt:
                    Gloss = g_loss(noise, True)
                gradient_g = gt.gradient(Gloss, G.trainable_variables)
                g_opt.apply_gradients(zip(gradient_g, G.trainable_variables))

        else:
            break

        # 若需加载训练好的权重，取消注释
        # if LOADED == 0:
        #     LOADED = 1
        #     G.load_weights('./save/myG.h5')
        #     D.load_weights('./save/myD.h5')

        if i % 20 == 0:
            generate_plot_image(i, epoch)
        if i % 80 == 0:
            print(i, "dloss:", Dloss, "Gloss:", Gloss, "w:", gp)

    G.save_weights('./save/myG.h5')
    D.save_weights('./save/myD.h5')

    # if epoch % 6 == 5:
    #     G.save_weights('./save2/{:05d}myG.h5'.format(epoch))
    #     D.save_weights('./save2/{:05d}myD.h5'.format(epoch))
