from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.examples.tutorials.mnist.input_data as input_data
import os
from scipy import stats

import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd

from tools import plot
from tools import math as tool_math
from tools import csv_process

show_images = plot.show_images
deprocess_img = plot.deprocess_img
correlation = tool_math.correlation

tf.set_random_seed(2017)

plt.rcParams['figure.figsize'] = (10.0, 8.0)  # 设置画图的尺寸
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'

mnist = input_data.read_data_sets('MINST_data')
csvPath = r'E:\教学任务\基于gan的课题研究\工程\TensorFlowLearning-master\gan\data\2.csv'
one_size = 16
train_set = csv_process.csv_process(csvPath, one_size)
print('==============', train_set.shape[0])
N_GPUS = 1
# 梯度惩罚Lambda超参数
LAMBDA = 10
DEVICES = ['/gpu:{}'.format(i) for i in range(N_GPUS)]
# input_ph 一个输入占位符，在数据集较大的时候，每次的输入可以是一部分
# sess.run([d_total_error, g_total_error, inputs_fake, train_generator],feed_dict={input_ph: train_imgs})
# 之后操作feed_dict来填充这种占位符
# 784是图片的像素大小(28 x 28) 而None之所以没有指定是因为可以通过feed_dict实际填充的数据来指定
input_ph = tf.placeholder(tf.float32, shape=[None, one_size], name="inputdata")
# 为什么inputs需要经过这种运算呢?
inputs = tf.divide(input_ph - 0.5, 0.5)


def conv1d(x, W):
    return tf.nn.conv1d(x, W, kennel_size=2, stride=2, padding='SAME')


def encoder(data, scope="encoder", reuse=None):
    with tf.variable_scope(scope, reuse=reuse):
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
            net = slim.fully_connected(data, 8, scope='fc1')
            # net = tf.nn.leaky_relu(net, alpha=0.2, name='act1')
            net = slim.fully_connected(net, 128, scope='fc2')
            net = slim.fully_connected(net, 64, scope='fc3')
            net = slim.fully_connected(net, 32, scope='fc4')
            net = slim.fully_connected(net, 16, scope='fc5')
            net = slim.fully_connected(net, 8, activation_fn=tf.nn.tanh, scope='fc6')
            return net


# kernel = np.array(np.arange(1, 1 + 4), dtype=np.float32))

def generator(noise, scope='generator', reuse=None):
    with tf.variable_scope(scope, reuse=reuse):
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
            net = slim.fully_connected(noise, 256, scope='fc1')
            # net = tf.nn.conv1d(net,kernel,1,'VALID')
            net = slim.fully_connected(net, 128, scope='fc2')
            net = slim.fully_connected(net, 64, scope='fc3')
            net = slim.fully_connected(net, 32, scope='fc4')
            net = slim.fully_connected(net, one_size, activation_fn=tf.nn.sigmoid, scope='fc5')
            return net


# 同样，batch_size尽管是一个“数”，但是因为input_ph只是一个占位符，实际元素还没有被填充，所以只能是一个暂定对象
# batch_size的意义是：代表每次局部从数据集中抽取的 独立数据样本的个数
batch_size = tf.shape(input_ph)[0]

noise_dim = 8
# (batch_size x noise_dim)的矩阵，数值为(-1,1)均匀分布
sample_noise = tf.random_uniform([batch_size, noise_dim], dtype=tf.float32, minval=-1.0, maxval=1.0,
                                 name='sample_noise')
print(sample_noise)
# (batch_size x 784)
input_real = inputs
inputs_fake = generator(sample_noise)
inputs_fake_ng = tf.stop_gradient(inputs_fake)
# (batch_size x 1)
z_real = encoder(inputs, reuse=tf.AUTO_REUSE)
# (batch_size x 1)
z_fake = encoder(inputs_fake, reuse=tf.AUTO_REUSE)
z_fake_ng = encoder(inputs_fake_ng, reuse=tf.AUTO_REUSE)
logits_real = tf.reduce_mean(z_real, 1, keepdims=True, name='z_real_mean')
print("logits_real")
print(logits_real)
logits_fake = tf.reduce_mean(z_fake, 1, keepdims=True, name='z_fake_mean')
logits_fake_ng = tf.reduce_mean(z_fake_ng, 1, keepdims=True, name='z_fake_ng_mean')
t1_loss = logits_real - logits_fake_ng
t2_loss = logits_fake - logits_fake_ng
z_corr = correlation(sample_noise, z_fake)
z_corr_mean = tf.reduce_mean(z_corr)
print(z_fake, sample_noise)
qp_loss = 0.25 * t1_loss[:, 0] ** 2 / tf.reduce_mean((input_real - inputs_fake_ng) ** 2)
# wasserstein
differences = inputs_fake - input_real
alpha = tf.random_uniform(shape=[batch_size, 1], minval=0, maxval=1, dtype=tf.float32)

interpolates = input_real + (alpha * differences)
z_interpolates = encoder(interpolates, reuse=tf.AUTO_REUSE)
interpolates_d = tf.reduce_mean(z_interpolates, 1, keepdims=True, name='d_interpolates')
gradients = tf.gradients(interpolates_d, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
# LAMBDA*gradient_penalty +加入
loss = tf.reduce_mean(t1_loss + t2_loss - 0.5 * z_corr) + tf.reduce_mean(qp_loss) + 1.0 * gradient_penalty

# 构建优化器
# opt = tf.train.AdamOptimizer(2e-4, beta1=0.5, beta2=0.999)
# 为什么此处的优化器中，必须指定var_list呢？
opt = tf.train.RMSPropOptimizer(1e-4, 0.99)
# discriminator_params = tf.trainable_variables('discriminator')
train_loss = opt.minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# print(inputs_fake)
# print(input_ph)

iter_count = 0
show_every = 100
list_corr = []
list_loss = []
# 最多完整训练十次训练集
for e in range(220):
    num_examples = 0
    num_batch = 0
    # 训练一次训练集
    while num_examples < train_set.shape[0]:
        # 通过打印信息，我们发现，这是一个循环调用训练集的过程
        # next_batch函数自身就就可以循环调用训练集
        # print("num_examples:", num_examples)
        # print("train_set.num_examples", train_set.num_examples)
        batch = 2
        if (num_batch + 1) * batch + 2 < train_set.shape[0]:
            train_times = train_set[[num_batch * batch, (num_batch + 1) * batch]]
            num_batch += 1
            num_examples += (num_batch + 1) * batch
        else:
            batch = train_set.shape[0] - (num_batch + 1) * batch
            train_times = train_set[[batch, train_set.shape[0]]]
            num_examples += batch

        # num_examples += batch
        # train_imgs= train_set[[num_batch*batch,(num_batch+1)*batch]]
        # print(train_imgs.shape)
        # print(type(train_imgs))
        # print(train_imgs)
        sess.run(train_loss, feed_dict={input_ph: train_times})

        if iter_count % show_every == 0:
            z_p, z_loss = sess.run([z_corr_mean, loss], feed_dict={input_ph: train_times})
            list_corr.append(z_p)
            list_loss.append(z_loss)
            print('Iter: {},corr_loss: {:.4f},loss: {:.4f}'.format(iter_count, z_p, z_loss))
            # print("sample_noise",sess.run(sample_noise,feed_dict={input_ph:train_imgs}))
            # print("z_fake",sess.run(z_fake,feed_dict={input_ph:train_imgs}))
            fake_times = sess.run(inputs_fake, feed_dict={input_ph: train_times})
            # print(fake_times)
            # imgs_numpy = deprocess_img(fake_times)
            # show_images(imgs_numpy[:16])
            # plt.show()
            # # 查看一下原图
            # print("show img")
            # imgs_numpy = deprocess_img(train_imgs)
            # show_images(imgs_numpy[:16])
            # plt.show()
            # print()

        iter_count += 1
data = pd.DataFrame(fake_times)
data.to_csv("data\data.csv")
len_corr = len(list_corr)
x = np.array(range(0, len_corr))
plt.plot(x, list_corr, linewidth='1', ls='solid', c='red', label='noise corr')
plt.plot(x, list_loss, linewidth='1', ls='solid', c='black', label='noise loss')
plt.legend()
plt.show()
saver = tf.train.Saver()  # 定义模型保存对象
saver.save(sess, os.path.abspath(os.path.dirname(__file__)) + "\\model\\ogan.ckpt")  # 保存模型
# 获取图文件的命令 这里我使用了绝对路径
tf.summary.FileWriter(os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + '\\graph', sess.graph)
