import os
import time
import itertools
import imageio
import pickle
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import scipy.misc
import data_input
import shutil
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True



def lrelu(x, th=0.2):
    return tf.maximum(th * x, x)

# G(z)
# def generator(x, isTrain=True, reuse=False):
    # with tf.variable_scope('generator', reuse=reuse):

    # # 1st hidden layer
    # conv1 = tf.layers.conv2d_transpose(x, 1024, [4, 4], strides=(1, 1), padding='valid')
    # lrelu1 = lrelu(tf.layers.batch_normalization(conv1, training=isTrain), 0.2)

    # # 2nd hidden layer
    # conv2 = tf.layers.conv2d_transpose(lrelu1, 512, [4, 4], strides=(2, 2), padding='same')
    # lrelu2 = lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)

    # # 3rd hidden layer
    # conv3 = tf.layers.conv2d_transpose(lrelu2, 256, [4, 4], strides=(2, 2), padding='same')
    # lrelu3 = lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)

    # # 4th hidden layer
    # conv4 = tf.layers.conv2d_transpose(lrelu3, 128, [4, 4], strides=(2, 2), padding='same')
    # lrelu4 = lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)

    # # output layer
    # conv5 = tf.layers.conv2d_transpose(lrelu4, 3, [4, 4], strides=(2, 2), padding='same')
    # o = tf.nn.tanh(conv5)

    # return o


def generator(x, isTrain=True, reuse=False):
    with tf.variable_scope('generator', reuse=reuse):
        # conv8 = tf.layers.conv2d_transpose(
            # x, 4096, [2, 2], strides=(1, 1), padding='valid')
        # lrelu8 = lrelu(tf.layers.batch_normalization(
            # conv8, training=isTrain), 0.2)

        # conv7 = tf.layers.conv2d_transpose(
            # lrelu8, 2048, [2, 2], strides=(1, 1), padding='same')
        # lrelu7 = lrelu(tf.layers.batch_normalization(
            # conv7, training=isTrain), 0.2)

        conv0 = tf.layers.conv2d_transpose(
            x, 1024, [4, 4], strides=(2, 2), padding='valid')
        lrelu0 = tf.nn.relu(
            tf.layers.batch_normalization(conv0, training=isTrain))
        # 1st hidden layer
        conv1 = tf.layers.conv2d_transpose(
            lrelu0, 512, [4, 4], strides=(2, 2), padding='same')
        lrelu1 = tf.nn.relu(
            tf.layers.batch_normalization(conv1, training=isTrain))
        # lrelu1 = lrelu(tf.layers.batch_normalization(conv1, training=isTrain), 0.2)

        # 2nd hidden layer
        conv2 = tf.layers.conv2d_transpose(
            lrelu1, 256, [4, 4], strides=(2, 2), padding='same')
        lrelu2 = tf.nn.relu(
            tf.layers.batch_normalization(conv2, training=isTrain))
        # lrelu2 = lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)

        # 3rd hidden layer
        conv3 = tf.layers.conv2d_transpose(
            lrelu2, 128, [4, 4], strides=(2, 2), padding='same')
        lrelu3 = tf.nn.relu(
            tf.layers.batch_normalization(conv3, training=isTrain))
        # lrelu3 = lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)

        # 4th hidden layer
        conv4 = tf.layers.conv2d_transpose(
            lrelu3, 64, [4, 4], strides=(2, 2), padding='same')
        lrelu4 = tf.nn.relu(
            tf.layers.batch_normalization(conv4, training=isTrain))
        # lrelu4 = lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)

        # output layer
        conv5 = tf.layers.conv2d_transpose(
            lrelu4, 3, [4, 4], strides=(2, 2), padding='same')
        o = tf.nn.tanh(conv5)
        print(x.shape)
        # print(conv8.shape)
        # print(conv7.shape)
        print(conv0.shape)
        print(conv1.shape)
        print(conv2.shape)
        print(conv3.shape)
        print(conv4.shape)
        print(conv5.shape)
        print("----Generator----")
        return o

# D(x)
# def discriminator(x, isTrain=True, reuse=False):
    # with tf.variable_scope('discriminator', reuse=reuse):
        # # 1st hidden layer
        # conv1 = tf.layers.conv2d(x, 128, [4, 4], strides=(2, 2), padding='same')
        # lrelu1 = lrelu(conv1, 0.2)

        # # 2nd hidden layer
        # conv2 = tf.layers.conv2d(lrelu1, 256, [4, 4], strides=(2, 2), padding='same')
        # lrelu2 = lrelu(tf.layers.batch_normalization(conv2, training=isTrain), 0.2)

        # # 3rd hidden layer
        # conv3 = tf.layers.conv2d(lrelu2, 512, [4, 4], strides=(2, 2), padding='same')
        # lrelu3 = lrelu(tf.layers.batch_normalization(conv3, training=isTrain), 0.2)

        # # 4th hidden layer
        # conv4 = tf.layers.conv2d(lrelu3, 1024, [4, 4], strides=(2, 2), padding='same')
        # lrelu4 = lrelu(tf.layers.batch_normalization(conv4, training=isTrain), 0.2)

        # # output layer
        # conv5 = tf.layers.conv2d(lrelu4, 1, [4, 4], strides=(1, 1), padding='valid')
        # o = tf.nn.sigmoid(conv5)
        # print(x.shape)
        # print(conv1.shape)
        # print(conv2.shape)
        # print(conv3.shape)
        # print(conv4.shape)
        # print(conv5.shape)

        # return o, conv5


def discriminator(x, isTrain=True, reuse=False):
    with tf.variable_scope('discriminator', reuse=reuse):
        # 1st hidden layer
        conv1 = tf.layers.conv2d(
            x, 1024, [4, 4], strides=(2, 2), padding='same')
        # lrelu1 = tf.nn.relu(conv1)
        lrelu1 = lrelu(conv1, 0.2)
        print(x.shape)
        print(conv1.shape)

        # 2nd hidden layer
        conv2 = tf.layers.conv2d(
            lrelu1, 512, [4, 4], strides=(2, 2), padding='same')
        # lrelu2 = tf.nn.relu(tf.layers.batch_normalization(conv2, training=isTrain))
        lrelu2 = lrelu(tf.layers.batch_normalization(
            conv2, training=isTrain), 0.2)
        print(conv2.shape)

        # 3rd hidden layer
        conv3 = tf.layers.conv2d(
            lrelu2, 256, [4, 4], strides=(2, 2), padding='same')
        # lrelu3 = tf.nn.relu(tf.layers.batch_normalization(conv3, training=isTrain))
        lrelu3 = lrelu(tf.layers.batch_normalization(
            conv3, training=isTrain), 0.2)
        print(conv3.shape)

        # 4th hidden layer
        conv4 = tf.layers.conv2d(
            lrelu3, 128, [4, 4], strides=(2, 2), padding='same')
        # lrelu4 = tf.nn.relu(tf.layers.batch_normalization(conv4, training=isTrain))
        lrelu4 = lrelu(tf.layers.batch_normalization(
            conv4, training=isTrain), 0.2)
        print(conv4.shape)

        # 5th hidden layer
        conv5 = tf.layers.conv2d(
            lrelu4, 64, [4, 4], strides=(2, 2), padding='same')
        # # lrelu5 = tf.nn.relu(tf.layers.batch_normalization(conv5, training=isTrain))
        lrelu5 = lrelu(tf.layers.batch_normalization(
            conv5, training=isTrain), 0.2)
        print(conv5.shape)

        # 6th hidden layer
        # conv6 = tf.layers.conv2d(lrelu5, 32, [4, 4], strides=(2, 2), padding='same')
        # lrelu6 = lrelu(tf.layers.batch_normalization(conv6, training=isTrain), 0.2)
        # print(lrelu6.shape)
        # # 7th hidden layer
        # conv7 = tf.layers.conv2d(lrelu6, 64, [2, 2], strides=(1, 1), padding='same')
        # lrelu7 = lrelu(tf.layers.batch_normalization(conv7, training=isTrain), 0.2)
        # print(lrelu7.shape)
        # output layer
        conv8 = tf.layers.conv2d(
            lrelu5, 1, [4, 4], strides=(2, 2), padding='valid')
        print(conv8.shape)
        # o = tf.nn.sigmoid(conv6)
        o = conv8

        return o, conv8


fixed_z_ = np.random.normal(0, 1, (25, 1, 1, 100))


def show_result(num_epoch, show=False, save=False, path='result.png'):
    test_images = sess.run(G_z, {z: fixed_z_, isTrain: False})

    size_figure_grid = 5
    fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))
    for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):
        ax[i, j].get_xaxis().set_visible(False)
        ax[i, j].get_yaxis().set_visible(False)

    for k in range(size_figure_grid*size_figure_grid):
        i = k // size_figure_grid
        j = k % size_figure_grid
        ax[i, j].cla()
        ax[i, j].imshow(np.reshape(test_images[k], (64, 64)), cmap='gray')

    label = 'Epoch {0}'.format(num_epoch)
    fig.text(0.5, 0.04, label, ha='center')

    if save:
        plt.savefig(path)

    if show:
        plt.show()
    else:
        plt.close()

# def show_train_hist(hist, show = False, save = False, path = 'Train_hist.png'):
#     x = range(len(hist['D_losses']))

#     y1 = hist['D_losses']
#     y2 = hist['G_losses']

# #    plt.plot(x, y1, label='D_loss')
# #    plt.plot(x, y2, label='G_loss')

# #    plt.xlabel('Epoch')
# #    plt.ylabel('Loss')

# #    plt.legend(loc=4)
# #    plt.grid(True)
# #    plt.tight_layout()

#     if save:
#         plt.savefig(path)


# training parameters
batch_size = 16
lr = 0.0002
train_epoch = 300
total_img_num = 512

# load MNIST
#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=[])

# load image
data_dir = "./train_dir/256_256/"

print("base information : ")
print('------batch_size : %d' % batch_size)
print('------train_epoch : %d' % train_epoch)
print('------total_img_num : %d' % total_img_num)
print('------data_dir : ' + data_dir)

# variables : input
x = tf.placeholder(tf.float32, shape=(None, 128, 128, 3))
z = tf.placeholder(tf.float32, shape=(None, 1, 1, 100))
isTrain = tf.placeholder(dtype=tf.bool)

# networks : generator
G_z = generator(z, isTrain)

# networks : discriminator
D_real, D_real_logits = discriminator(x, isTrain)
D_fake, D_fake_logits = discriminator(G_z, isTrain, reuse=True)

# loss for each network
D_loss_real = tf.reduce_mean(tf.scalar_mul(-1, D_real_logits))
D_loss_fake = tf.reduce_mean(D_fake_logits)
# D_loss_real = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=D_real_logits, labels=tf.ones([batch_size, 1, 1, 1])))
# D_loss_fake = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.zeros([batch_size, 1, 1, 1])))
D_loss = D_loss_real + D_loss_fake
G_loss = tf.reduce_mean(tf.scalar_mul(-1, D_fake_logits))
# G_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=D_fake_logits, labels=tf.ones([batch_size, 1, 1, 1])))


# trainable variables for each network
T_vars = tf.trainable_variables()
D_vars = [var for var in T_vars if var.name.startswith('discriminator')]
G_vars = [var for var in T_vars if var.name.startswith('generator')]

# loss clip up
CLIP = [-0.01, 0.01]

# optimizer for each network
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
    # D_optim = tf.train.GradientDescentOptimizer(lr * 1).minimize(D_loss, var_list=D_vars)
    # G_optim = tf.train.GradientDescentOptimizer(lr * 5).minimize(G_loss, var_list=G_vars)
    # D_optim = tf.train.AdamOptimizer(lr * 1, beta1=0.5).minimize(D_loss, var_list=D_vars)
    # G_optim = tf.train.AdamOptimizer(lr * 1, beta1=0.5).minimize(G_loss, var_list=G_vars)
    D_optim = tf.train.RMSPropOptimizer(
        lr * 1).minimize(D_loss, var_list=D_vars)
    G_optim = tf.train.RMSPropOptimizer(
        lr * 1).minimize(G_loss, var_list=G_vars)
    clip_d_op = [var.assign(tf.clip_by_value(var, CLIP[0], CLIP[1]))
                 for var in D_vars]
    #clip_d_op = D_optim


# open session and initialize all variables
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
# gpu_options = tf.GPUOptions(gpu_options=tf.GPUOptions(allow_growth=True))
config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# sess = tf.InteractiveSession()
# tf.global_variables_initializer().run()

# MNIST resize and normalization
# train_set = tf.image.resize_images(mnist.train.images, [64, 64]).eval()
# train_set = (train_set - 0.5) / 0.5  # normalization; range: -1 ~ 1

# image processs
train_set = data_input.data_input(
    data_dir=data_dir, batch_size=batch_size, shuffle=True)

# results save folder
# root = 'MNIST_DCGAN_results/'
# model = 'MNIST_DCGAN_'
# if not os.path.isdir(root):
# os.mkdir(root)
# if not os.path.isdir(root + 'Fixed_results'):
# os.mkdir(root + 'Fixed_results')
if os.path.exists('./result'):
    shutil.rmtree('./result')
    os.mkdir('./result')

train_hist = {}
train_hist['D_losses'] = []
train_hist['G_losses'] = []
train_hist['per_epoch_ptimes'] = []
train_hist['total_ptime'] = []


# training-loop
np.random.seed(int(time.time()))
print('training start!')
start_time = time.time()
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
    tf.global_variables_initializer().run()
    for epoch in range(train_epoch):
        trickNum = 5
        G_losses = []
        D_losses = []
        epoch_start_time = time.time()
        train_set = np.random.permutation(train_set)
        for iter in range(total_img_num // batch_size):
            # update discriminator
            x_ = train_set[iter*batch_size:(iter+1)*batch_size]
            x_ = np.reshape(x_, [batch_size, 128, 128, 3])
            if iter < 25 or iter % 50 == 0:
                trickNum = 25
            else:
                trickNum = 5
            for i in range(trickNum):
                z_ = np.random.uniform(-1, 1, (batch_size, 1, 1, 100))
                loss_d_, _, __ = sess.run([D_loss, D_optim, clip_d_op], {
                    x: x_, z: z_, isTrain: True})
                D_losses.append(loss_d_)

            # update generator
            z_ = np.random.uniform(-1, 1, (batch_size, 1, 1, 100))
            loss_g_, _ = sess.run([G_loss, G_optim], {
                                  z: z_, x: x_, isTrain: True})
            # z_ = np.random.uniform(-1, 1, (batch_size, 1, 1, 100))
            # loss_g_, _  = sess.run([G_loss, G_optim], {z: z_, x: x_, isTrain: True})
            G_losses.append(loss_g_)
            if iter % 2 == 0:
                print('------[%d]time : loss_d: %.6f, loss_g: %.6f' %
                      (iter, np.mean(D_losses), np.mean(G_losses)))
        epoch_end_time = time.time()
        per_epoch_ptime = epoch_end_time - epoch_start_time
        print('[%d/%d] - ptime: %.2f loss_d: %.6f, loss_g: %.6f' % ((epoch + 1),
                                                                    train_epoch, per_epoch_ptime, np.mean(D_losses), np.mean(G_losses)))
        train_hist['D_losses'].append(np.mean(D_losses))
        train_hist['G_losses'].append(np.mean(G_losses))
        train_hist['per_epoch_ptimes'].append(per_epoch_ptime)
        if epoch % 2 == 0 or epoch + 1 == train_epoch:
            fixed_z_ = np.random.uniform(-1, 1, (10, 1, 1, 100))
            test_images = []
            test_images = sess.run(G_z, {z: fixed_z_, isTrain: False})
        for i in range(3):
            scipy.misc.imsave('./result/' + str(epoch) +
                              '_' + str(i) + '.png', np.reshape((test_images[i] + 1.)/2., (128, 128, 3)))


end_time = time.time()
total_ptime = end_time - start_time
train_hist['total_ptime'].append(total_ptime)

print('Avg per epoch ptime: %.2f, total %d epochs ptime: %.2f' %
      (np.mean(train_hist['per_epoch_ptimes']), train_epoch, total_ptime))
print("Training finish!... save training results")

def pre_train():
    with open(root + model + 'train_hist.pkl', 'wb') as f:
    pickle.dump(train_hist, f)

    show_train_hist(train_hist, save=True, path=root + model + 'train_hist.png')

#images = []
# for e in range(train_epoch):
#    img_name = root + 'Fixed_results/' + model + str(e + 1) + '.png'
#    images.append(imageio.imread(img_name))
#imageio.mimsave(root + model + 'generation_animation.gif', images, fps=5)

sess.close()

