import tensorflow as tf
from tensorflow.keras import layers, Model
from .model_trainner import ModelTrainner
from .traffic_model_param import TrafficModelParam
from .math import pearson_correlation
from typing import Union


def generate_model(input_dim: int, output_dim: int):
    model = tf.keras.Sequential()
    model.add(layers.Dense(64 * 8, use_bias=False, input_shape=(input_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    model.add(layers.Reshape((8, 64)))

    # 8,64
    model.add(layers.Conv1D(32, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())  # 8,32
    model.add(layers.UpSampling1D(size=2))  # 16,32

    # 16,32
    model.add(layers.Conv1D(16, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())  # 16,16
    model.add(layers.UpSampling1D(size=2))  # 32,32

    # 32,32
    model.add(layers.Conv1D(16, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())  # 32,16
    model.add(layers.UpSampling1D(size=2))  # 64,16

    model.add(layers.Flatten())
    model.add(layers.Dense(output_dim, use_bias=False, activation="sigmoid"))

    return model


def discriminator_model(input_dim: int, output_dim: int):
    model = tf.keras.Sequential()

    model.add(layers.Dense(64 * 16, use_bias=False, input_shape=(input_dim,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    model.add(layers.Reshape((16, 64)))

    # 16,64
    model.add(layers.Conv1D(32, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())  # 16,32

    model.add(layers.Conv1D(16, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv1D(1, kernel_size=4, padding="same"))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Flatten())
    model.add(layers.Dense(output_dim, use_bias=False, activation="sigmoid"))

    return model


class TrafficModelTrainner(ModelTrainner):
    noise_dim: int
    traffic_dim: int
    g_model: Union[Model, None]
    d_model: Union[Model, None]

    def __init__(self, epochs: int, batch_size: int, param: TrafficModelParam):
        """
        Traffic模型训练器
        :param epochs: 训练次数
        :param batch_size: 批次大小
        :param param: 模型参数
        """
        super().__init__(epochs, batch_size)
        self.noise_dim = param.noise_dim
        self.traffic_dim = param.traffic_dim
        self.g_model = None
        self.d_model = None

    def train(self):
        """
        训练模型模型的函数，训练好的模型，就是自身的成员变量g_model与d_model
        如果trainner的g_model为空，那么代码会为它创建一个新的g_model
        同理也适用于d_moel。
        :return:
        """
        if self.d_model is None:
            self.d_model = discriminator_model(self.traffic_dim, self.noise_dim)
        if self.g_model is None:
            self.g_model = generate_model(self.noise_dim, self.traffic_dim)
        optimizer_qrloss = tf.keras.optimizers.Adam(1e-4)
        optimizer_pearson_loss = tf.keras.optimizers.Adam(1e-4)

        for i in range(self.epochs):
            traffic_spans = tf.random.uniform([self.batch_size, self.traffic_dim])
            self.train_step(traffic_spans, optimizer_qrloss,
                            optimizer_pearson_loss)

    def train_step(self, traffic_spans, optimizer_qrloss: tf.keras.optimizers.Optimizer,
                   optimizer_pearson_loss: tf.keras.optimizers.Optimizer):
        z = tf.random.uniform([self.batch_size, self.noise_dim], dtype=tf.dtypes.float32)
        with tf.GradientTape() as tape_qr_loss, tf.GradientTape() as tape_pearson_loss:
            generate_traffic_spans = self.g_model(z)

            fake_out = self.d_model(generate_traffic_spans)
            real_out = self.d_model(traffic_spans)

            fake_out_means = tf.reduce_mean(fake_out, axis=1)
            real_out_means = tf.reduce_mean(real_out, axis=1)

            qrloss = fake_out_means - real_out_means

            pearson_loss = tf.reduce_mean(pearson_correlation(z, fake_out))

            variables = self.g_model.trainable_variables + self.d_model.trainable_variables
        gradients_of_qrloss = tape_qr_loss.gradient(fake_out_means, variables)
        gradients_of_pearson_loss = tape_pearson_loss.gradient(pearson_loss, variables)

        optimizer_qrloss.apply_gradients(zip(gradients_of_qrloss, variables))
        optimizer_pearson_loss.apply_gradients(zip(gradients_of_pearson_loss, variables))
