import tensorflow as tf
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
from tools.o_gan_module.math import pearson_correlation
import time

BUFFER_SIZE = 60000
BATCH_SIZE = 256
NOISE_DIM = 8
TRAFFIC_DIM = 16


# 批量化和打乱数据
# train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)


def generate_model():
    model = tf.keras.Sequential()

    model.add(layers.Dense(64, use_bias=False, input_shape=(NOISE_DIM,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(128, use_bias=False, input_shape=(64,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(TRAFFIC_DIM, use_bias=False, input_shape=(128,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    return model


def discriminator_model():
    model = tf.keras.Sequential()

    model.add(layers.Dense(64, use_bias=False, input_shape=(TRAFFIC_DIM,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(128, use_bias=False, input_shape=(64,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(256, use_bias=False, input_shape=(128,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(NOISE_DIM, use_bias=False, input_shape=(256,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    return model


d_model = discriminator_model()
g_model = generate_model()
optimizer_of_qr_loss = tf.keras.optimizers.Adam(1e-4)
optimizer_of_pearson_loss = tf.keras.optimizers.Adam(1e-4)


def train_step(traffic_spans):
    """
    单步训练
    :return:
    """
    z = tf.random.normal([BATCH_SIZE, NOISE_DIM])
    with tf.GradientTape() as tape_qr_loss, tf.GradientTape() as tape_pearson_loss:
        generate_traffic_spans = g_model(z)

        fake_out = d_model(generate_traffic_spans)
        real_out = d_model(traffic_spans)

        fake_out_mean = tf.reduce_mean(fake_out, axis=1)
        real_out_mean = tf.reduce_mean(real_out, axis=1)

        qrloss = fake_out_mean - real_out_mean

        pearson_loss = tf.reduce_mean(pearson_correlation(z, fake_out))
        # 需要被训练的变量集合（Tensors）
        variables = g_model.trainable_variables + d_model.trainable_variables

    gradients_of_qr_loss = tape_qr_loss.gradient(fake_out_mean, variables)
    gradients_of_pearson_loss = tape_pearson_loss.gradient(pearson_loss, variables)

    optimizer_of_qr_loss.apply_gradients(
        zip(gradients_of_qr_loss, variables))
    optimizer_of_pearson_loss.apply_gradients(
        zip(gradients_of_pearson_loss, variables))


def train():
    """
    训练模型
    :return:
    """
    for i in range(100):
        train_step(tf.random.normal([BATCH_SIZE, TRAFFIC_DIM]))


if __name__ == '__main__':
    train()
