import os
import tensorflow as tf
import numpy as np
from scipy.misc.pilutil import *
import shutil
from cn.redguest.pbase.model.Dense import Dense
from cn.redguest.pbase.model.Processor import *
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

learning_v = 0.001

img_height = 28
img_width = 28
img_size = img_height * img_width

to_train = True
output_path = "output"

max_epoch = 500

h1_size = 150
h2_size = 300
z_size = 100
batch_size = 3


def build_generator(z_prior):
    dense1 = Dense(z_prior, z_size, h1_size, tf.nn.relu)
    dense2 = Dense(dense1.y, h1_size, h2_size, tf.nn.relu)
    dense3 = Dense(dense2.y, h2_size, img_size, tf.nn.tanh)
    return dense3.y, [dense1.Weights, dense1.Biases, dense2.Weights, dense2.Biases, dense3.Weights, dense3.Biases]


def build_discriminator(x_data, x_generated, keep_prob):
    x_in = tf.concat([x_data, x_generated], 0)
    dense1 = Dense(x_in, img_size, h2_size, tf.nn.relu)
    drop1 = Dropout(dense1.y, keep_prob)
    dense2 = Dense(drop1.y, h2_size, h1_size, tf.nn.relu)
    drop2 = Dropout(dense2.y, keep_prob)
    dense3 = Dense(drop2.y, h1_size, 1)
    y_data = tf.nn.sigmoid(tf.slice(dense3.y, [0, 0], [batch_size, -1], name=None))
    y_generated = tf.nn.sigmoid(tf.slice(dense3.y, [batch_size, 0], [-1, -1], name=None))
    d_params = [dense1.Weights, dense1.Biases, dense2.Weights, dense2.Biases, dense3.Weights, dense3.Biases]
    return y_data, y_generated, d_params


def train():
    x_data = tf.placeholder(tf.float32, [batch_size, img_size], name="x_data")
    z_prior = tf.placeholder(tf.float32, [batch_size, z_size], name="z_prior")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")

    x_generated, g_params = build_generator(z_prior)
    y_data, y_generated, d_params = build_discriminator(x_data, x_generated, keep_prob)

    d_loss = - (tf.log(y_data) + tf.log(1 - y_generated))
    g_loss = - tf.log(y_generated)

    optimizer = tf.train.AdamOptimizer(0.0001)

    d_trainer = optimizer.minimize(d_loss, var_list=d_params)
    g_trainer = optimizer.minimize(g_loss, var_list=g_params)

    init = tf.initialize_all_variables()

    saver = tf.train.Saver()

    sess = tf.Session()

    sess.run(init)

    merged = tf.summary.merge_all()  # 将图形、训练过程等数据合并在一起
    writer = tf.summary.FileWriter('logs', sess.graph, flush_secs=10)  # 将训练日志写入到logs文件夹下

    if os.path.exists(output_path):
        shutil.rmtree(output_path)
    os.mkdir(output_path)

    z_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)

    for i in range(0, max_epoch):
        for j in range(int(500)):
            print("epoch:%s, iter:%s" % (i, j))
            x_value1, _ = mnist.train.next_batch(batch_size)
            x_value1 = 2 * x_value1.astype(np.float32) - 1

            x_value = imread("mn/01.jpg", mode="L").reshape([1, img_height * img_width])
            x_value = np.concatenate(
                (x_value, imread("mn/02.jpg", mode="L").reshape([1, img_height * img_width])))
            x_value = np.concatenate(
                (x_value, imread("mn/03.jpg", mode="L").reshape([1, img_height * img_width])))

            x_value = x_value / 300

            z_value = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
            sess.run(d_trainer,
                     feed_dict={x_data: x_value, z_prior: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
            if j % 1 == 0:
                sess.run(g_trainer,
                         feed_dict={x_data: x_value, z_prior: z_value, keep_prob: np.sum(0.7).astype(np.float32)})
        x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_sample_val})
        show_result(x_gen_val, os.path.join(output_path, "sample%s.jpg" % i))
        z_random_sample_val = np.random.normal(0, 1, size=(batch_size, z_size)).astype(np.float32)
        x_gen_val = sess.run(x_generated, feed_dict={z_prior: z_random_sample_val})
        show_result(x_gen_val, os.path.join(output_path, "random_sample%s.jpg" % i))


def show_result(batch_res, fname, grid_size=(8, 8), grid_pad=5):
    batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], img_height, img_width)) + 0.5
    img_h, img_w = batch_res.shape[1], batch_res.shape[2]
    grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
    grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
    img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
    for i, res in enumerate(batch_res):
        if i >= grid_size[0] * grid_size[1]:
            break
        img = res * 255
        img = img.astype(np.uint8)
        row = (i // grid_size[0]) * (img_h + grid_pad)
        col = (i % grid_size[1]) * (img_w + grid_pad)
        img_grid[row:row + img_h, col:col + img_w] = img
    imsave(fname, img_grid)


if __name__ == "__main__":
    train()
