import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
import glob
import os
import cv2

#%%  预处理
(train_images,train_labels),(_,_)=tf.keras.datasets.mnist.load_data()
train_images=[img.reshape(28,28,1)for img in train_images]
train_images=np.array([cv2.resize(img,(56,56)) for img in train_images])
train_images=train_images.reshape(train_images.shape[0],56,56,1).astype('float32')-127.5/127.5   #归一化（-1，1）

BATCH_SIZE=256
BUFFER_SIZE=60000

datasets=tf.data.Dataset.from_tensor_slices(train_images)
datasets=datasets.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)

#%%
# path = glob.glob('./imgs/*/*')
# train_images=[]
# for i in path:
#     img=cv2.imread(i)
#     img=cv2.resize(img,(28,28))[:,:,1].reshape(28,28,1)
#     train_images.append(img)


# BATCH_SIZE = 32
# BUFFER_SIZE = 60000

# train_images=np.array(train_images)
# datasets = tf.data.Dataset.from_tensor_slices(train_images)
# datasets = datasets.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)

#%%  
def generator_model():  # 生成器
    model=tf.keras.models.Sequential()
    # 输入的维度时100，输出维度（神经元个数）1024
    model.add(tf.keras.layers.Dense(input_dim=100,units=7*7*2,activation='tanh'))
    model.add(tf.keras.layers.BatchNormalization())  #批标准化，防止收敛过程变慢，抑制梯度弥散
    model.add(tf.keras.layers.Activation('tanh'))
    model.add(tf.keras.layers.Reshape((7,7,2)))      
    model.add(tf.keras.layers.UpSampling2D(size=(2,2)))    
    model.add(tf.keras.layers.Conv2D(4,(3,3),padding='same',activation='tanh'))
    model.add(tf.keras.layers.UpSampling2D(size=(2,2))) 
    model.add(tf.keras.layers.Conv2D(8,(3,3),padding='same',activation='tanh'))
    model.add(tf.keras.layers.UpSampling2D(size=(2,2)))    
    model.add(tf.keras.layers.Conv2D(16,(3,3),padding='same',activation='tanh'))
    model.add(tf.keras.layers.Conv2D(1,(3,3),padding='Same',activation='tanh'))
    model.summary()
    return model



def discriminator_model():  #辨别器
    model=tf.keras.models.Sequential()
    model.add(tf.keras.layers.Conv2D(16,(3,3),padding='same',input_shape=(28,28,1),activation='tanh'))
    model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2)))   #使宽和高各缩小2
    model.add(tf.keras.layers.Conv2D(8,(3,3),padding='same',activation='tanh'))
    model.add(tf.keras.layers.AveragePooling2D(pool_size=(2,2)))
    model.add(tf.keras.layers.Conv2D(4,(3,3),padding='same',activation='tanh'))
    model.add(tf.keras.layers.Flatten())   #打平
    # model.add(tf.keras.layers.Dense(512,activation='relu'))
    model.add(tf.keras.layers.Dense(1,activation='sigmoid'))
    model.summary()
    return model



cross_entropy=tf.keras.losses.BinaryCrossentropy()   #因为最后的loss输出没有激活，所以要有  from_logits=True

def discriminator_loss(real_out,fake_out):  #损失函数
    real_loss=cross_entropy(tf.ones_like(real_out),real_out)  #tf.ones_like是创建一个唯独与tensor相同，元素全为1的tensor
    fake_loss=cross_entropy(tf.zeros_like(fake_out),fake_out)
    return real_loss+fake_loss
def generator_loss(fake_out):  #损失函数  
    return cross_entropy(tf.ones_like(fake_out),fake_out)
    
generator_opt=tf.keras.optimizers.Adam(1e-3)
discriminator_opt=tf.keras.optimizers.Adam(1e-3)


#%%  训练过程
EPOCHS=100
noise_dim=100 

num_exp_to_generate=16

seed=tf.random.normal([num_exp_to_generate,noise_dim])   #生成16个长度为100的随机向量

generator=generator_model()        #获取生成器模型
discriminator=discriminator_model()     #获取判别器模型

def train_step(images):
    noise=tf.random.normal([num_exp_to_generate,noise_dim])
    
    with tf.GradientTape() as gen_tape,  tf.GradientTape() as disc_tape:
        real_out=discriminator(images,training=True)
        
        gen_image=generator(noise,training=True)
        fake_out=discriminator(gen_image,training=True)
        
        gen_loss=generator_loss(fake_out)
        disc_loss=discriminator_loss(real_out, fake_out)
    gradient_gen=gen_tape.gradient(gen_loss,generator.trainable_variables)
    gradient_disc=disc_tape.gradient(disc_loss,discriminator.trainable_variables)
    generator_opt.apply_gradients(zip(gradient_gen,generator.trainable_variables))
    discriminator_opt.apply_gradients(zip(gradient_disc,discriminator.trainable_variables))

def generator_plot_image(gen_model,test_noise):
    pre_images=gen_model(test_noise,training=False)
    fig=plt.figure(figsize=(4,4))
    for i in range(pre_images.shape[0]):
        plt.subplot(4,4,i+1)
        plt.imshow((pre_images[i,:,:,0]+1)/2,cmap='gray')
        plt.axis('off')     #不显示坐标系
    plt.show()

def train(dataset,epochs):
    for epoch in range(epochs):
        for image_batch in dataset:
            train_step(image_batch)
            print('.',end='')
        generator_plot_image(generator,seed)

train(datasets,EPOCHS)












