# -*- encoding: utf-8 -*-
'''
@File    :   dagmm.py
@Time    :   2021/11/23 16:01
@Author  :   ZhangChaoYang
@Desc    :   Deep Autoencoding Gaussian Mixture Model https://sites.cs.ucsb.edu/~bzong/doc/iclr18-dagmm.pdf
'''

import os
import sys

sys.path.insert(0, os.getcwd())
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Softmax, Dropout, BatchNormalization
import tensorflow_addons as tfa
from models.encoders import FCEncoder
from models.decoders import FCDecoder
from models import losses
from util.view import draw_train_history
from tensorflow.keras.models import Sequential
from util.err_analyze import fit_err_percentage
from tensorflow.keras.callbacks import History
from anomaly_detection.gmm import GMM
from util.work_flow import preprocess, gen_outfile, save_model, load_model, train_test_split, preprocess1
import pickle

from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('corpus', "jiangnan", '数据集，可选cwru、jiangnan、paderborn')
flags.DEFINE_string('data_dim', "1d", '数据集，可选1d、2d')
flags.DEFINE_string('data_trans', "original",
                    '数据集，data_dim=1d时data_trans可选original、fft、stat，data_dim=2d时data_trans可选sfft、cwt')


class Compression(Model):
    def __init__(self, input_shape, hidden_dims):
        super(Compression, self).__init__()
        self.encoder = FCEncoder(hidden_dims, activation=tf.nn.relu, batch_norm=True)
        self.decoder = FCDecoder(input_shape, hidden_dims, activation=tf.nn.relu, batch_norm=True)

    def call(self, inputs, training=None, mask=None):
        zc = self.encoder(inputs)
        x_hat = self.decoder(zc)
        return zc, x_hat


class Estimation(Model):
    def __init__(self, hidden_dims, lambda1=0.1, lambda2=0.005):
        super(Estimation, self).__init__()
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        estimater_layers = []
        for hidden_dim in hidden_dims[:-1]:
            estimater_layers.append(BatchNormalization())
            estimater_layers.append(Dense(units=hidden_dim, activation=tf.nn.tanh))
        estimater_layers.append(BatchNormalization())
        estimater_layers.append(Dropout(0.5))
        estimater_layers.append(Dense(units=hidden_dims[-1]))
        estimater_layers.append(Softmax())
        self.main = Sequential(estimater_layers)

    def call(self, inputs, training=None, mask=None):
        return self.main(inputs)


class DAGMM(Model):
    def __init__(self, input_shape, hidden_dims, estimater_dims, lambda1=0.1, lambda2=0.005):
        '''
        hidden_dims[-1]太大即z的维度太大，容易导致解线性方程时矩阵不可逆。
        lambda1=0.1, lambda2=0.005时log_loss和diag_loss会降，但reconstruct_loss一直保持不动。
        如果把lambda1和lambda2设得非常小，那reconstruct_loss会慢慢下降，但log_loss会逐渐上升。
        '''
        super(DAGMM, self).__init__()
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.compresser = Compression(input_shape, hidden_dims)
        self.estimater = Estimation(estimater_dims, lambda1, lambda2)
        self.K = estimater_dims[-1]  # estimater最后一层的维度即为GMM component的个数

    def call(self, inputs, training=None, mask=None):
        zc, x_hat = self.compresser(inputs)

        def euclid_norm(x):
            return tf.sqrt(tf.reduce_sum(tf.square(x), axis=1))

        inputs_norm = euclid_norm(inputs)
        x_hat_norm = euclid_norm(x_hat)
        dist_x = euclid_norm(inputs - x_hat)
        dot_x = tf.reduce_sum(inputs * x_hat, axis=1)
        relative_euclidean_distance = dist_x / inputs_norm  # 训练过程中，几乎一直等于1
        cosin_distance = dot_x / (inputs_norm * x_hat_norm)  # 训练过程中，一直非常接近于0
        z = tf.concat([zc, relative_euclidean_distance, cosin_distance], axis=1)
        gamma = self.estimater(z)  # 开始训练时，gamma在每一个维度上取值接近；后面就严重分化，有一维趋近于1，其他维趋近于0
        return x_hat, gamma, z

    def get_loss(self, x, x_hat, gamma, z):
        # reconstruct_loss = tf.reduce_mean(losses.square_loss(x, x_hat), axis=0)  # 所有样本的平均重构误差
        reconstruct_loss = 0  # 既然reconstruct_loss优化不动，索性不去优化它
        gmm = GMM()
        gmm.fit(z, gamma)
        energy = tf.reduce_mean(gmm.energy(z))
        diag_loss = gmm.cov_diag_loss()
        return (reconstruct_loss, energy, diag_loss)

    def train(self, X_train, X_test, batch_size, epochs, optimizer, chart_file=""):
        n = X_train.shape[0]
        train_losses = []
        test_losses = []
        for epoch in range(epochs):
            np.random.shuffle(X_train)  # 每轮都打乱训练样本
            loss_list = []
            energy_loss_list = []
            reconstruct_loss_list = []
            diag_loss_list = []
            for begin in range(0, n, batch_size):
                end = begin + batch_size
                if end > n:
                    end = n
                x = X_train[begin:end]
                # compression net和estimation net一起优化
                with tf.GradientTape() as tape:
                    x_hat, gamma, z = self(x)  # 正向传播
                    reconstruct_loss, energy, diag_loss = self.get_loss(x, x_hat, gamma, z)  # 计算loss
                    loss = reconstruct_loss + self.lambda1 * energy + self.lambda2 * diag_loss
                    # print("train z", np.mean(z, axis=0))
                    # print("train gamma", np.mean(gamma, axis=0))
                    loss_list.append(loss)
                    energy_loss_list.append(energy)
                    reconstruct_loss_list.append(reconstruct_loss)
                    diag_loss_list.append(diag_loss)
                grads = tape.gradient(loss, self.trainable_variables)  # 计算梯度
                optimizer.apply_gradients(zip(grads, self.trainable_variables))  # 梯度反向传播
            curr_loss = np.mean(loss_list)
            curr_energy_loss = np.mean(energy_loss_list)
            curr_reconstruct_loss = np.mean(reconstruct_loss_list)
            curr_diag_loss = np.mean(diag_loss_list)
            x_hat, gamma, z = self(X_test)  # 正向传播
            reconstruct_loss, energy, diag_loss = self.get_loss(X_test, x_hat, gamma, z)  # 计算loss
            test_loss = reconstruct_loss + self.lambda1 * energy + self.lambda2 * diag_loss
            train_losses.append(curr_loss)
            test_losses.append(test_loss)
            # print("test z", np.mean(z, axis=0))
            # print("tets gamma", np.mean(gamma, axis=0))
            print(
                "epoch {} loss {} reconstruct_loss {} log_loss {} diag_loss {} val_loss {} val_reconstruct_loss {} val_log_loss {} val_diag_loss {}".format(
                    epoch,
                    curr_loss,
                    curr_reconstruct_loss,
                    curr_energy_loss,
                    curr_diag_loss,
                    test_loss,
                    reconstruct_loss,
                    energy,
                    diag_loss),
                flush=True)

        history = History()
        history.history = {"loss": train_losses, "val_loss": test_losses}
        draw_train_history(history, chart_file=chart_file)

    def train4stream(self, X_generator, X_test, batch_size, optimizer, chart_file=""):
        '''
        流式训练，节约内存，以时间换空间
        '''
        train_loss_list = []
        train_energy_loss_list = []
        train_reconstruct_loss_list = []
        train_diag_loss_list = []
        train_loss_history = []
        train_energy_loss_history = []
        train_reconstruct_loss_history = []
        test_loss_history = []
        test_energy_loss_history = []
        test_reconstruct_loss_history = []
        buffer = []
        step = 0
        for X in X_generator:
            buffer.append(X)
            if len(buffer) < batch_size:
                continue
            x = np.stack(buffer, axis=0)
            buffer = []
            step += 1
            # compression net和estimation net一起优化
            with tf.GradientTape() as tape:
                x_hat, gamma, z = self(x)  # 正向传播

                reconstruct_loss, energy, diag_loss = self.get_loss(x, x_hat, gamma, z)  # 计算loss
                loss = reconstruct_loss + self.lambda1 * energy + self.lambda2 * diag_loss
                train_loss_list.append(loss)
                train_energy_loss_list.append(energy)
                train_reconstruct_loss_list.append(reconstruct_loss)
                train_diag_loss_list.append(diag_loss)
            grads = tape.gradient(loss, self.trainable_variables)  # 计算梯度
            optimizer.apply_gradients(zip(grads, self.trainable_variables))  # 梯度反向传播

            if step % 100 == 0:
                print("train z", np.mean(z, axis=0))
                print("train gamma", np.mean(gamma, axis=0))
                curr_loss = np.mean(train_loss_list)
                curr_energy_loss = np.mean(train_energy_loss_list)
                curr_reconstruct_loss = np.mean(train_reconstruct_loss_list)
                curr_diag_loss = np.mean(train_diag_loss_list)
                train_loss_list = []
                train_energy_loss_list = []
                train_reconstruct_loss_list = []
                train_diag_loss_list = []
                train_loss_history.append(curr_loss)
                train_energy_loss_history.append(curr_energy_loss)
                train_reconstruct_loss_history.append(curr_reconstruct_loss)

                x_hat, gamma, z = self(X_test)  # 正向传播
                print("test z", np.mean(z, axis=0))
                print("tets gamma", np.mean(gamma, axis=0))
                reconstruct_loss, energy, diag_loss = self.get_loss(X_test, x_hat, gamma, z)  # 计算loss
                test_loss = reconstruct_loss + self.lambda1 * energy + self.lambda2 * diag_loss
                test_loss_history.append(test_loss)
                test_energy_loss_history.append(energy)
                test_reconstruct_loss_history.append(reconstruct_loss)

                print(
                    "step {} loss {} reconstruct_loss {} log_loss {} diag_loss {} val_loss {} val_reconstruct_loss {} val_log_loss {} val_diag_loss {}".format(
                        step,
                        curr_loss,
                        curr_reconstruct_loss,
                        curr_energy_loss,
                        curr_diag_loss,
                        test_loss,
                        reconstruct_loss,
                        energy,
                        diag_loss),
                    flush=True)

        history = History()
        history.history = {"loss": train_loss_history, "val_loss": test_loss_history,
                           "energy": train_energy_loss_history, "val_energy": test_energy_loss_history,
                           "recon": train_reconstruct_loss_history, "val_recon": test_reconstruct_loss_history}
        draw_train_history(history, chart_file=chart_file)


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    learning_rate = 1e-4
    batch_size = 256
    epochs = 100  # 由于样本数量不同，不区分工况时，paderborn用5轮，cwru和jiangnan用30轮。# 如果是基于general模型继续训练，则要把epochs设得非常小
    model_name = "dagmm"

    normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file = gen_outfile(
        data_dim,
        corpus,
        data_trans,
        model_name,
    )
    print(check_file)
    print(train_history_file)

    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)
    X_train, X_test = train_test_split(X)

    n_component = 2
    gmm = GMM()
    optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=1E-4)
    model = DAGMM(input_shape=(X.shape[-2], X.shape[-1]), hidden_dims=[128, 16],
                  estimater_dims=[3 * n_component, n_component],
                  # lambda2=1E-13# 如果是基于general模型继续训练，则要把lambda2设得非常小，不要让diag_loss影响模型参数
                  )

    model.train(X_train, X_test, batch_size=batch_size, epochs=epochs, optimizer=optimizer,
                chart_file=train_history_file)
    save_model(model, model_file)
    with open(os.path.join(model_file, "opt"), "wb") as fout:
        pickle.dump(optimizer.get_config(), fout, protocol=pickle.HIGHEST_PROTOCOL)
    # saved_model_cli show --dir $model_file --tag_set serve --signature_def serving_default
    model = load_model(model_file)

    print("正常样本")
    x_hat, gamma, normal_z = model(X)
    fit_err_percentage(X, x_hat, losses.square_loss)  # 编码器在正常样本上的拟合误差
    gmm.fit(normal_z, gamma)
    # DAGMM建议按energy loss划定阈值,以些来区分正常和异常样本
    energy = gmm.energy(normal_z)
    print("energy loss")
    sl = sorted(energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(sl))
        print("{}%\t{:.2e}".format(p, sl[index]))
    print("100%\t{:.2e}".format(sl[-1]))
    gmm.threshold = sl[int(0.9 * len(sl))]  # 以正常样本90%分位点作为判别阈值
    print("threshold", gmm.threshold)
    print("正常样本召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(sl) <= gmm.threshold) / len(sl)))
    gmm.save(os.path.join(model_file, "gmm"))
    gmm.load(os.path.join(model_file, "gmm"))

    import matplotlib.pyplot as plt
    print("异常样本")
    ano_x_hat, gamma, ano_z = model(ano_X)
    fit_err_percentage(ano_X, ano_x_hat, losses.square_loss)  # 编码器在异常样本上的拟合误差

    # DAGMM建议按energy loss划定阈值,以此来区分正常和异常样本
    energy = gmm.energy(ano_z)
    print("energy loss")
    ano_sl = sorted(energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(ano_sl))
        print("{}%\t{:.2e}".format(p, ano_sl[index]))
    print("100%\t{:.2e}".format(ano_sl[-1]))
    print("异常样本召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(ano_sl) > gmm.threshold) / len(ano_sl)))
    # jiangnan 异常样本召回率81.26%

    if len(sl) > 10000:
        sl = np.random.choice(sl, 10000)
        sl = sorted(sl)
    if len(ano_sl) > 10000:
        ano_sl = np.random.choice(ano_sl, 10000)
        ano_sl = sorted(ano_sl)
    plt.figure(figsize=(8, 8))
    ax = plt.subplot()
    plt.suptitle("蓝色:正常样本,红色:异常样本")
    ax.axhline(gmm.threshold, color='black', linestyle='dashed')
    ax.scatter(range(len(sl) // 100 * 99), sl[:len(sl) // 100 * 99], color='b', s=1)  # 正常样本用蓝色
    ax.scatter(range(len(ano_sl) // 100 * 90), ano_sl[:len(ano_sl) // 100 * 90], color='r', s=1)  # 异常样本用红色
    plt.savefig(check_file, format="png")

    plt.show()


if __name__ == "__main__":
    app.run(main)

# python .\anomaly_detection\dagmm.py --corpus cwru --data_dim 1d --data_trans original
# python .\anomaly_detection\dagmm.py --corpus jiangnan --data_dim 1d --data_trans original
