import numpy
import numpy as np
import tensorflow as tf
import keras.datasets.fashion_mnist as fashion_mnist
import keras.datasets.mnist as mnist
from tensorflow.python.platform import gfile

tf.compat.v1.disable_eager_execution()

TRAINING_STEPS = 550
batch_size = 100


# 定义 mnist数据加载初始化
def data_init():
    (x_train, _), (_, _) = mnist.load_data()
    # (x_train, _), (_, _) = mnist.load_data("E://python//tensorflow_study//MNIST_data//mnist.npz")

    x_train = np.reshape(x_train, (60000, 784))
    x_train = x_train / 255.0
    x_train_np = np.array(x_train[:], dtype='float32')
    return x_train_np


def getRandomIndex(n, x):
    # 索引范围为[0, n)，随机选x个不重复，注意replace=False不重复
    if x > n:
        x = n
    index = np.random.choice(np.arange(n), size=x, replace=False)
    return index


# 随机获取数据
def data_batch_set(input_data):
    data_len = len(input_data)
    index = getRandomIndex(data_len, batch_size)
    return input_data[index]


# 归一化到指定区间[a,b]
def tf_normalize(tensor, a, b):
    tensor_max = tf.reduce_max(tensor)
    tensor_min = tf.reduce_min(tensor)
    return a + (((b - a) / (tensor_max - tensor_min)) * (tensor - tensor_min))


# 归一化到指定区间[a,b]
def np_normalize(np_array, a, b):
    max = np.max(np_array)
    min = np.min(np_array)
    return a + (((b - a) / (max - min)) * (np_array - min))


# 定义层函数
def add_layer(input, in_size, out_size, active_function=None):
    # input 输入矩阵
    # in_size 输入矩阵列大小
    # out_size 输出矩阵列大小
    # active_function 激活函数
    weighs = tf.compat.v1.get_variable('f_weighs', shape=[in_size, out_size],
                                       initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.1))
    bais = tf.compat.v1.get_variable('f_bais', shape=[1, out_size]
                                     , initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.1))
    # 激励输入
    z_i = tf.matmul(input, weighs) + bais

    if active_function is None:
        return tf.nn.sigmoid(z_i)
    return active_function(z_i)


# 定义生成器前向传播
def generator_inference(input_tensor, reuse_variables=False):
    # 输入是batchX128噪声数据
    with tf.name_scope("G"):
        with tf.compat.v1.variable_scope("generator_1", reuse=reuse_variables):
            out_1 = add_layer(input_tensor, 128, 256, tf.nn.leaky_relu)
            # out_1 = tf.nn.leaky_relu(out_1)
        with tf.compat.v1.variable_scope("generator_2", reuse=reuse_variables):
            out_2 = add_layer(out_1, 256, 28 * 28)
            # out_2 = tf.nn.sigmoid(out_2)

    return out_2


# 定义判别器前向传播
def discriminator_inference(input_tensor, reuse_variables=False):
    # 输入是batchX784图片数据
    with tf.name_scope("D"):
        with tf.compat.v1.variable_scope("discriminator_1", reuse=reuse_variables):
            out_1 = add_layer(input_tensor, 784, 256, tf.nn.leaky_relu)
        with tf.compat.v1.variable_scope("discriminator_2", reuse=reuse_variables):
            out_2 = add_layer(out_1, 256, 1, tf.nn.sigmoid)

    return out_2


# 获取生成器、判别器需要训练的变量
def get_trainable_variables():
    t_vars = tf.compat.v1.trainable_variables()
    g_vars = [var for var in t_vars if 'generator_' in var.name]
    d_vars = [var for var in t_vars if 'discriminator_' in var.name]
    return g_vars, d_vars


# 高斯分布噪声
def create_gaussian_noise(batch_size):
    array = np.array(np.random.normal(loc=0.0, scale=1.0, size=(batch_size, 128)), dtype='float32')
    return array


# 定义模型训练过程
def train():
    global_step_d = tf.Variable(0, trainable=False)
    global_step_g = tf.Variable(0, trainable=False)

    # 判别器输入（真实图片）
    image_x = tf.compat.v1.placeholder(tf.float32, [None, 784])
    # 生成器输入（随机噪声）
    noise_x = tf.compat.v1.placeholder(tf.float32, [None, 128])
    # 真实图片的概率
    real_p = discriminator_inference(input_tensor=image_x)
    # 生成器生成的图片
    fake_images = generator_inference(input_tensor=noise_x)

    # 判断生成器生成图片的概率
    fake_p = discriminator_inference(fake_images, reuse_variables=True)

    # 损失函数
    d_loss = -1. * tf.reduce_mean(tf.compat.v1.log(real_p) + tf.compat.v1.log(1.0 - fake_p))
    g_loss = -1. * tf.reduce_mean(tf.compat.v1.log(fake_p))

    # 获取需要训练的变量
    g_vars, d_vars = get_trainable_variables()
    # 使用tf.train.GradientDescentOptimizer 优化算法来优化损失函数。
    train_step_d = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss=d_loss,
                                                                                             global_step=global_step_d,
                                                                                             var_list=d_vars)
    train_step_g = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss=g_loss,
                                                                                             global_step=global_step_g,
                                                                                             var_list=g_vars)
    # 初始化会话并开始训练过程。
    init_var = tf.compat.v1.global_variables_initializer()
    with tf.compat.v1.Session() as sess:
        sess.run(init_var)
        _ = tf.compat.v1.summary.FileWriter('tensorboard/', sess.graph)
        # 真实样本
        real_images = data_init()
        # 开始交替训练D&G
        for i in range(TRAINING_STEPS):

            train_batch_images = data_batch_set(real_images)
            train_batch_noise = create_gaussian_noise(batch_size)
            _, d_loss_value, real_p_value, fake_p_value = sess.run([train_step_d, d_loss, real_p, fake_p],
                                                                   feed_dict={image_x: train_batch_images,
                                                                              noise_x: train_batch_noise})
            _, g_loss_value = sess.run([train_step_g, g_loss],
                                       feed_dict={noise_x: train_batch_noise})

            if i % 20 == 0:
                # loss
                print("step(%d)\nD_loss=%f\nG_loss=%f\n" % (i * 100, d_loss_value, g_loss_value))

            # if i % 20 == 0:
            #     fake_imgs = generator_inference(create_gaussian_noise(8), reuse_variables=True)
            #     # save_images(ses , sess.run(fake_imgs))


def main():
    train()


if __name__ == '__main__':
    main()
