import tensorflow.keras as tf
import tensorflow.keras.layers as tklays
import tensorflow.keras.models as tkmods
import tensorflow.keras.optimizers as tkopts
import tensorflow.keras.activations as tkacts
import tensorflow.keras.callbacks as tkcbks
import tensorflow.keras.losses as tkloss
import numpy as np
import matplotlib.pyplot as plt
import sklearn.preprocessing as prep
from scipy.stats import norm

import pathlib
import datetime
import dataclasses

import config


@dataclasses.dataclass
class OneDConvParam:
    filters: int
    kernel_size: int
    strides: int
    padding: str


@dataclasses.dataclass
class ConParam:
    units: int


class GaussData:
    def __init__(self):
        self.mean = 0
        self.dev = 1.0
        self.steps = 288
        self.num_test = self.num_valid = self.num_train = 128
        self.feature_dim = 1

    def __getattr__(self, typ):
        assert typ in ["train_sample", "valid_sample", "test_sample"]
        return self._inner

    def _inner(self, batch_size):
        return np.random.normal(loc=self.mean, scale=self.dev, size=(batch_size, self.steps, self.feature_dim))


class TrafficData:
    def __init__(self):
        self.suffix_name = "_complementation_smoothed.csv"
        self.steps = 288
        self.feature_dim = 1
        self.train_file = pathlib.Path("train" + self.suffix_name)
        self.test_file = pathlib.Path("test" + self.suffix_name)
        self.valid_file = pathlib.Path("valid" + self.suffix_name)
        self.base_path = pathlib.Path("data")
        self.train_data = np.loadtxt(self.base_path / self.train_file, delimiter=",")
        self.valid_data = np.loadtxt(self.base_path / self.valid_file, delimiter=",")
        self.test_data = np.loadtxt(self.base_path / self.test_file, delimiter=",")
        self.num_train = self.train_data.shape[0]
        self.num_valid = self.valid_data.shape[0]
        self.num_test = self.test_data.shape[0]
        self.train_pt = self.valid_pt = self.test_pt = 0
        self.normalizer = prep.MinMaxScaler((0, 1))
        self.normalizer.fit(self.train_data.reshape(-1, 1))
        self._normalize()
        # For AE only
        self.train_x = self.train_y = self.normed_train

    def _normalize(self):
        self.normed_train = self.normalizer.transform(self.train_data.reshape(-1, 1)).reshape((self.num_train, self.steps, self.feature_dim))
        self.normed_valid = self.normalizer.transform(self.valid_data.reshape(-1, 1)).reshape((self.num_valid, self.steps, self.feature_dim))
        self.normed_test = self.normalizer.transform(self.test_data.reshape(-1, 1)).reshape((self.num_test, self.steps, self.feature_dim))

    def sample(self, data, pt_name, total):
        def inner(batch_size):
            pt = getattr(self, pt_name)
            if pt + batch_size > total:
                sample = np.concatenate(
                    (data[pt:],
                        data[:(pt + batch_size) % total]),
                    axis=0,
                )
            else:
                sample = data[pt:pt + batch_size]
            pt = (pt + batch_size) % total
            setattr(self, pt_name, pt)
            return sample
        return inner

    def single_sample(self, ind=None):
        if ind is None:
            ind = np.random.randint(0, self.num_train)
        return self.train_data[ind]

    def __getattr__(self, typ):
        """ Only used for three kinds of sampling methods """
        assert typ in ["train_sample", "valid_sample", "test_sample"]
        typ = typ.split("_")[0]
        data = getattr(self, "normed_" + typ)
        pt_name = typ + "_pt"
        total = getattr(self, "num_" + typ)
        return self.sample(data, pt_name, total)


class AutoEncoder:
    def __init__(self, dataset, anchor=2, load_weights=False):
        self.dataset = dataset
        self.feature_dim = 1
        self.dropout_rate = 0.5
        self.leaky = 0.2
        self.momentum = 0.8
        self.batch_size = config.batch_size
        self.lr = 1e-3
        self.epochs = 100
        self.steps = self.dataset.steps
        self.layers = [
            ConParam(units=288),
            ConParam(units=128),
            ConParam(units=32),
            ConParam(units=128),
            ConParam(units=288),
        ]
        self.anchor = anchor  # Define which layer as the output of the encoder, TODO
        self.model_path = pathlib.Path('./models')
        self.layer_num = len(self.layers)
        self.outputs = self.build_layer()
        self.model = tkmods.Model(inputs=self.inputs, outputs=self.outputs)
        self.optimizer = tkopts.Adam(learning_rate=self.lr)
        self.loss = tkloss.MeanSquaredError()
        self.model.compile(optimizer=self.optimizer, loss=self.loss)
        self.model.summary()
        if load_weights:
            self.model.load_weights(self.model_path)
        self.date_str = "%Y-%m-%d-%H-%M-%S"
        self.today = datetime.datetime.now().strftime(self.date_str)
        self.callbacks = [
            tkcbks.TensorBoard(log_dir=f'./logs/{self.today}'),
            tkcbks.ModelCheckpoint(filepath=self.model_path),
        ]

    def build_layer(self):
        x = self.inputs = tklays.Input(shape=(self.steps, self.feature_dim))
        for ind, l in enumerate(self.layers):
            x = tklays.Dense(**dataclasses.asdict(l))(x)
            x = tklays.Dropout(rate=self.dropout_rate)(x)
            if ind != self.layer_num - 1:
                x = tklays.LeakyReLU(alpha=self.leaky)(x)
        return x

    def train(self):
        loss = self.model.fit(
            self.dataset.train_x,
            self.dataset.train_y,
            epochs=self.epochs,
            batch_size=self.batch_size,
            callbacks=self.callbacks,
        )


class GANModel:
    def __init__(self, dataset, noise_dim=18, z_type="Gauss"):
        self.dataset = dataset
        self.feature_dim = self.dataset.feature_dim
        self.steps = self.dataset.steps
        self.batch_size = config.batch_size
        self.noise_dim = noise_dim
        self.z_type = z_type
        # self.generator_layers = (32, 64)
        self.generator_dense_units = 16
        self.in_channels = 16  # self.generator_dense_units // self.noise_dim
        self.generator_layers = [
            OneDConvParam(filters=16, kernel_size=3, strides=2, padding="SAME"),
            OneDConvParam(filters=8, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=4, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=2, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=1, kernel_size=2, strides=1, padding="SAME"),
        ] #  Transpose
        self.discriminator_layers = [
            OneDConvParam(filters=8, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=4, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=2, kernel_size=2, strides=2, padding="SAME"),
            OneDConvParam(filters=1, kernel_size=2, strides=2, padding="SAME"),
        ]

        self.leaky = 0.2
        self.momentum = 0.8
        self.lr = 1e-3
        self.train_epoch = config.train_epoch
        self.info_times = 10
        self.info_epoch = self.train_epoch // self.info_times
        self.valid_times = 10
        self.valid_epoch = self.train_epoch // self.valid_times

        self.optimizer = tkopts.Adam(lr=self.lr)
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss="binary_crossentropy", optimizer=self.optimizer, metrics=["accuracy"])

        self.discriminator.trainable = False

        self.generator = self.build_generator()

        gened_data = self.generator(self.generator_ipt)
        ganoutput = self.discriminator(gened_data)
        self.ganmodel = tkmods.Model(inputs=self.generator_ipt, outputs=ganoutput, name="GAN")
        self.ganmodel.compile(loss="binary_crossentropy", optimizer=self.optimizer, metrics=["accuracy"])
        self.ganmodel.summary()

        time = datetime.datetime.now()
        self.tensorboard_cb = tkcbks.TensorBoard(log_dir=f"logs/{time}")

    # def build_generator(self):
    #     self.generator_ipt = tklays.Input(self.noise_dim)
    #     generator_model = tkmods.Sequential(name="Generator")
    #     generator_model.add(self.generator_ipt)
    #     for l in self.generator_layers:
    #         generator_model.add(tklays.Dense(l))
    #         generator_model.add(tklays.LeakyReLU(alpha=self.leaky))
    #         generator_model.add(tklays.BatchNormalization(momentum=self.momentum))
    #     generator_model.add(tklays.Dense(self.steps))
    #     # generator_model.build((None, self.noise_dim))
    #     generator_model.summary()
    #     return generator_model

    def build_generator(self):
        self.generator_ipt = tklays.Input((self.noise_dim, self.feature_dim))
        generator_model = tkmods.Sequential(name="Generator")
        generator_model.add(self.generator_ipt)
        generator_model.add(tklays.Dense(self.generator_dense_units, input_shape=(self.noise_dim, self.feature_dim)))
        for l in self.generator_layers:
            generator_model.add(tklays.Conv1DTranspose(**dataclasses.asdict(l)))
            generator_model.add(tklays.LeakyReLU(alpha=self.leaky))
            generator_model.add(tklays.BatchNormalization(momentum=self.momentum))
        generator_model.add(tklays.Flatten())
        # generator_model.add(tklays.Dense(self.steps))
        generator_model.add(tklays.Reshape((self.steps, self.feature_dim)))
        # generator_model.build((None, self.noise_dim))
        generator_model.summary()
        return generator_model

    # def build_discriminator(self):
    #     self.discriminator_ipt = tklays.Input(self.steps)
    #     discriminator_model = tkmods.Sequential(name="Discriminator")
    #     discriminator_model.add(self.discriminator_ipt)
    #     for l in self.discriminator_layers:
    #         discriminator_model.add(tklays.Dense(l))
    #         discriminator_model.add(tklays.LeakyReLU(alpha=self.leaky))
    #         discriminator_model.add(tklays.BatchNormalization(momentum=self.momentum))
    #     discriminator_model.add(tklays.Dense(1, activation="sigmoid"))
    #     # discriminator_model.build((None, self.steps))
    #     discriminator_model.summary()
    #     return discriminator_model

    def build_discriminator(self):
        self.discriminator_ipt = tklays.Input((self.steps, self.feature_dim))
        discriminator_model = tkmods.Sequential(name="Discriminator")
        discriminator_model.add(self.discriminator_ipt)
        for l in self.discriminator_layers:
            discriminator_model.add(tklays.Conv1D(**dataclasses.asdict(l)))
            discriminator_model.add(tklays.LeakyReLU(alpha=self.leaky))
            discriminator_model.add(tklays.BatchNormalization(momentum=self.momentum))
        discriminator_model.add(tklays.Flatten())
        discriminator_model.add(tklays.Dense(1, activation="sigmoid"))
        discriminator_model.summary()
        return discriminator_model

    def noise(self, batch_size=64):
        if self.z_type == "Uniform":
            return np.random.uniform(0, 1, (batch_size, self.noise_dim, self.feature_dim))
        elif self.z_type == "Gauss":
            return np.random.normal(loc=0, scale=1, size=(batch_size, self.noise_dim, self.feature_dim))

    def pretrain(self):
        """ Pretrain the discriminator """
        pass

    def train(self):
        basic_labels = np.concatenate((np.ones((self.batch_size // 2, 1, 1)), np.zeros((self.batch_size // 2, 1, 1))), axis=0)
        discriminator_losses = np.zeros((self.train_epoch, 1))
        gan_losses = np.zeros((self.train_epoch, 1))
        discriminator_acc = np.zeros((self.train_epoch, 1))
        gan_acc = np.zeros((self.train_epoch, 1))
        for epoch in range(self.train_epoch):
            real_data = self.dataset.train_sample(batch_size=self.batch_size // 2)
            noise_data = self.noise(batch_size=self.batch_size // 2)
            generated_data = self.generator.predict_on_batch(noise_data)
            train_data = np.concatenate((np.concatenate((real_data, generated_data), axis=0), basic_labels), axis=1)
            np.random.shuffle(train_data)
            train_data, labels = train_data[:, :-1], train_data[:, -1]
            self.discriminator.trainable = True
            discriminator_loss, discriminator_accuracy = self.discriminator.train_on_batch(x=train_data, y=labels)
            discriminator_losses[epoch] = discriminator_loss
            discriminator_acc[epoch] = discriminator_accuracy
            self.discriminator.trainable = False
            generated_labels = np.ones((self.batch_size, 1))  # Labels for generator
            noise_data = self.noise(batch_size=self.batch_size)
            gan_loss, gan_accuracy = self.ganmodel.train_on_batch(x=noise_data, y=generated_labels)
            gan_losses[epoch] = gan_loss
            gan_acc[epoch] = gan_accuracy
            if not epoch % self.info_epoch:
                print(f"Training Epoch: {epoch},"
                      f" Discriminator loss: {discriminator_loss},"
                      f" Generator loss: {gan_loss}")
            if not epoch % self.valid_epoch:
                self.valid()
        # self.plot("losses", ("discriminator", discriminator_losses), ("gan", gan_losses))
        # self.plot("accuracy", ("discriminator", discriminator_acc), ("gan", gan_acc))
        return self.test()

    def valid(self):
        valid_size = self.dataset.num_valid
        valid_data = self.dataset.valid_sample(valid_size)
        noise_data = self.noise(valid_size)
        fake_data = self.generator.predict_on_batch(noise_data)
        discriminator_true = self.discriminator.predict_on_batch(valid_data)
        discriminator_fake = self.discriminator.predict_on_batch(fake_data)
        true_accuracy = (discriminator_true[discriminator_true > 0.5]).shape[0] / valid_size
        fake_accuracy = (discriminator_fake[discriminator_fake < 0.5]).shape[0] / valid_size
        print(f"Validation True Accuracy:{true_accuracy}; Fake Accuracy: {fake_accuracy}")

    def test(self):
        test_size = self.dataset.num_test
        test_data = self.dataset.test_sample(test_size)
        noise_data = self.noise(test_size)
        fake_data = self.generator.predict_on_batch(noise_data)
        discriminator_true = self.discriminator.predict_on_batch(test_data)
        discriminator_fake = self.discriminator.predict_on_batch(fake_data)
        true_accuracy = (discriminator_true[discriminator_true > 0.5]).shape[0] / test_size
        fake_accuracy = (discriminator_fake[discriminator_fake < 0.5]).shape[0] / test_size
        print(f"Test True Accuracy:{true_accuracy}; Fake Accuracy: {fake_accuracy}")
        return fake_data


dataset = TrafficData()
ae = AutoEncoder(dataset)
ae.train()

# test_data_den, _ = np.histogram(test_data.flatten(), bins=100, density=True)
# plt.figure()
# plt.plot(test_data_den)
# plt.show()


