from __future__ import print_function

import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm

from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten, Lambda
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.utils import to_categorical
from keras import backend as K


# 加载MNIST数据集
f = np.load('.\mnist.npz')
x_train, y_train = f['x_train'], f['y_train']
x_test, y_test = f['x_test'], f['y_test']
f.close()

image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255


# 网络参数
input_shape = (image_size, image_size, 1)
batch_size = 100
kernel_size = 3
filters = 8
latent_dim = 20  # 隐变量取2维只是为了方便后面画图
epochs = 100
num_classes = 10

y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
y = Input(shape=(num_classes,))  # 输入类别
yh = Dense(latent_dim)(y)  # 这里就是直接构建每个类别的均值

x_in = Input(shape=input_shape)
x = x_in

for i in range(2):
    filters *= 2
    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               activation='relu',
               strides=2,
               padding='same')(x)

# 备份当前shape，等下构建decoder的时候要用
shape = K.int_shape(x)

x = Flatten()(x)
x = Dense(16, activation='relu')(x)
# 算p(Z|X)的均值和方差
z_mean = Dense(latent_dim)(x)
z_log_var = Dense(latent_dim)(x)

# 重参数技巧
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=K.shape(z_mean))
    return z_mean + K.exp(z_log_var / 2) * epsilon

# 重参数层，相当于给输入加入噪声
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])

# 解码层，也就是生成器部分
# 先搭建为一个独立的模型，然后再调用模型
latent_inputs = Input(shape=(latent_dim,))
x = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
x = Reshape((shape[1], shape[2], shape[3]))(x)

for i in range(2):
    x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=2,
                        padding='same')(x)
    filters //= 2

outputs = Conv2DTranspose(filters=1,
                          kernel_size=kernel_size,
                          activation='sigmoid',
                          padding='same')(x)

# 搭建为一个独立的模型
decoder = Model(latent_inputs, outputs)

x_out = decoder(z)

# 建立模型
vae = Model([x_in, y], [x_out, yh])

# xent_loss是重构loss，kl_loss是KL loss
xent_loss = K.sum(K.binary_crossentropy(x_in, x_out), axis=[1, 2, 3])
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean-yh) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)

# add_loss是新增的方法，用于更灵活地添加各种loss
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop')
vae.summary()

# history=vae.fit([x_train, y_train],
#         shuffle=True,
#         epochs=epochs,
#         batch_size=batch_size,
#         validation_data=([x_test, y_test], None))

# vae.save("cnnvae-200.h5")
# vae.save("cnnvae-100.h5")
# vae.load_weights("cnnvae-200.h5")
vae.load_weights("cnnvae-100.h5")
# 构建encoder，然后观察各个数字在隐空间的分布
encoder = Model(x_in, z_mean)

x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
# plt.figure(figsize=(6, 6))
# plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
# plt.colorbar()
# plt.show()

# 输出每个类的均值向量
mu = Model(y, yh)
# np.eye生成对角矩阵
mu = mu.predict(np.eye(num_classes))
# print(mu.size)
output_digit = 5
# 观察隐变量的两个维度变化是如何影响输出结果的
n = 15  # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))

# 用正态分布的分位数来构建隐变量对
# grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
# grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
# grid_x = norm.ppf(np.linspace(0.05, 0.95, n)) + mu[output_digit][0]
# grid_y = norm.ppf(np.linspace(0.05, 0.95, n)) + mu[output_digit][1]
# grid_a = norm.ppf(np.linspace(0.05, 0.95, n)) + mu[output_digit][2]
# grid_b = norm.ppf(np.linspace(0.05, 0.95, n)) + mu[output_digit][3]
# grid_c = norm.ppf(np.linspace(0.05, 0.95, n)) + mu[output_digit][4]

# for i, yi in enumerate(grid_x):
#     for j, xi in enumerate(grid_y):
#         z_sample = np.array([[xi, yi]])
# x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
# y = np.array([14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
# for i, yi in enumerate(x):
#     for j, xi in enumerate(y):
#         z_sample = np.array([[grid_x[xi], grid_y[yi], grid_a[xi], grid_b[yi], grid_c[xi]]])
#         x_decoded = decoder.predict(z_sample)
#         digit = x_decoded[0].reshape(digit_size, digit_size)
#         figure[i * digit_size: (i + 1) * digit_size,
#                j * digit_size: (j + 1) * digit_size] = digit
#
# plt.figure(figsize=(10, 10))
# plt.imshow(figure, cmap='Greys_r')
# plt.show()

encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)


def plot_img(x_test, decoded_imgs):
    # 对比重构前后的图像
    import matplotlib.pyplot as plt

    n = 10
    plt.figure(figsize=(20, 4))
    for i in range(n):
        # 展示原始图像
        ax = plt.subplot(2, n, i + 1)
        plt.imshow(x_test[i+200].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        # 展示重构后图像
        ax = plt.subplot(2, n, i + 1 + n)
        plt.imshow(decoded_imgs[i+200].reshape(28, 28))
        plt.gray()
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
    plt.show()
    return


plot_img(x_test, decoded_imgs)

n = 10
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
for i in range(n):
    for j in range(n):
        noise_shape = (1, latent_dim)
        z_sample = np.array(np.random.randn(*noise_shape))
        x_recon = decoder.predict(z_sample)
        figure[i * digit_size: (i + 1) * digit_size,
               j * digit_size: (j + 1) * digit_size] = x_recon.reshape(28,28)

plt.figure(figsize=(10, 10))
plt.axis("off")
plt.imshow(figure, cmap='Greys_r')
plt.show()



# fig = plt.figure()
# plt.plot(history.history['loss'],label='train loss')
# plt.plot(history.history['val_loss'], label='test loss')
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(loc='upper right')
# plt.show()
def plot_ori(decoded_img):
    x = 10
    y = 2
    digit_size = 28
    figure = np.zeros((digit_size * y, digit_size * x))
    for i in range(y):
        for j in range(x):
            print("%d %d",i,j)
            img = np.reshape(decoded_img[i*10+j+100], [-1, 784])
            print(img.shape)
            figure[i * digit_size: (i + 1) * digit_size,
            j * digit_size: (j + 1) * digit_size] = img.reshape(28, 28)
    plt.figure(figsize=(10, 10))
    plt.imshow(figure, cmap='Greys_r')
    plt.axis("off")
    plt.show()
    return

decoded_img = decoder.predict(encoder.predict(x_test))
plot_ori(decoded_img)