# -*- encoding: utf-8 -*-
'''
@File    :   vae.py
@Time    :   2021/11/23 10:27
@Author  :   ZhangChaoYang
@Desc    :   差分自动编码Variational auto-encode
'''

import os
import sys

sys.path.insert(0, os.getcwd())
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from models.encoders import FCEncoder, ConvEncoder
from models.decoders import FCDecoder, ConvDecoder
from models import losses
from util.view import draw_train_history, draw_fit_error_1d, draw_fit_error_2d, draw_fit_error
from util.err_analyze import fit_err_percentage
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import History
from util.work_flow import preprocess, gen_outfile, save_model, load_model

from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('kind', "fc", 'AE模型使用哪种网络，可选fc或conv')
flags.DEFINE_string('corpus', "jiangnan", '数据集，可选cwru、jiangnan、paderborn')
flags.DEFINE_string('data_dim', "1d", '数据集，可选1d、2d')
flags.DEFINE_string('data_trans', "original",
                    '数据集，data_dim=1d时data_trans可选original、fft、stat，data_dim=2d时data_trans可选sfft、cwt')


def reparameterize(mu, log_var):
    var = tf.exp(log_var)
    std = var ** 0.5
    epsilon = tf.random.normal(shape=[1, log_var.shape[1]])  # epsilon服从标准正态分布
    z = mu + std * epsilon  # 对标准正态分布进行简单变换，得到服从普通正态分布的随机变量
    return z


class VAE(Model):
    def __init__(self, kind, input_shape=None, hidden_dims=None, kernel_size=[], strides=[], filters=[], kl_coef=1.0):
        super(VAE, self).__init__()
        self.kl_coef = kl_coef
        if kind == "fc":
            self.encoder = FCEncoder(hidden_dims)
            self.decoder = FCDecoder(input_shape, hidden_dims)
        elif kind == "conv":
            height, width = input_shape
            self.encoder = ConvEncoder(kernel_size, strides, filters)
            self.decoder = ConvDecoder(height, width, kernel_size, strides, filters)
        else:
            raise Exception("unsupported AE type: {}".format(kind))
        self.mu_dense = Dense(units=hidden_dims[-1])  # 不对均值做范围约束
        self.log_var_dense = Dense(units=hidden_dims[-1])  # 不对方差的对数做范围约束（方差是正数域，但方差的对数是实数域）

    def call(self, inputs, training=None, mask=None):
        hidden = self.encoder(inputs)
        mu = self.mu_dense(hidden)
        log_var = self.log_var_dense(hidden)
        z = reparameterize(mu, log_var)
        x_hat = self.decoder(z)
        return x_hat, mu, log_var

    def get_loss(self, x, x_hat, mu, log_var):
        reconstruct_loss = tf.reduce_mean(tf.reduce_mean((x_hat - x) ** 2, axis=-1), axis=-1)  # 各个样本的重构误差
        reconstruct_loss = tf.reduce_mean(reconstruct_loss, axis=0)  # 所有样本的平均重构误差
        kl_div = -0.5 * (log_var + 1 - mu ** 2 - tf.exp(log_var))
        kl_div = tf.reduce_mean(kl_div)
        loss = reconstruct_loss + self.kl_coef * kl_div
        return loss, reconstruct_loss, kl_div

    def train(self, X, batch_size, epochs, learning_rate, chart_file=""):
        X_train, X_test = train_test_split(X, test_size=0.2, random_state=0)
        optimizer = Adam(learning_rate=learning_rate)
        n = X_train.shape[0]
        min_delta = 1e-6
        patience = 30
        prev_loss = 0
        successive = 0
        train_losses = []
        test_losses = []
        for epoch in range(epochs):
            np.random.shuffle(X_train)  # 每轮都打乱训练样本
            losses = []
            reg_losses = []
            kl_divs = []
            for begin in range(0, n, batch_size):
                end = begin + batch_size
                if end > n:
                    end = n
                x = X_train[begin:end]
                with tf.GradientTape() as tape:
                    x_hat, mu, log_var = self(x)  # 正向传播
                    loss, reg_loss, kl_div = self.get_loss(x, x_hat, mu, log_var)  # 计算loss
                    losses.append(loss)
                    reg_losses.append(reg_loss)
                    kl_divs.append(kl_div)
                    grads = tape.gradient(loss, self.trainable_variables)  # 计算梯度
                    optimizer.apply_gradients(zip(grads, self.trainable_variables))  # 梯度反向传播
            curr_loss = np.mean(losses)
            x_hat, mu, log_var = self(X_test)  # 正向传播
            test_loss, _, _ = self.get_loss(X_test, x_hat, mu, log_var)
            train_losses.append(curr_loss)
            test_losses.append(test_loss)
            print("epoch {} loss {} 拟合误差 {} KL散度 {} 验证集上的拟合误差 {}".format(
                epoch, curr_loss, np.mean(reg_losses), np.mean(kl_divs), test_loss), flush=True)

            if prev_loss != 0:
                if math.fabs(prev_loss - curr_loss) < min_delta:
                    successive += 1
                    if successive >= patience:
                        break
                else:
                    successive = 0
            prev_loss = curr_loss

        history = History()
        history.history = {"loss": train_losses, "val_loss": test_losses}
        draw_train_history(history, chart_file=chart_file)


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    kind = FLAGS.kind

    learning_rate = 1e-4
    batch_size = 128
    epochs = 100
    model_name = "vae"

    normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file = gen_outfile(
        data_dim,
        corpus,
        data_trans,
        model_name)
    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)

    model = VAE(kind=kind, input_shape=(X.shape[-2], X.shape[-1]), hidden_dims=[128, 32])
    model.train(X, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, chart_file=train_history_file)
    save_model(model, model_file)
    model = load_model(model_file)

    def reconstruct(model, x):
        hidden = model.encoder(x)
        mu = model.mu_dense(hidden)
        x_hat = model.decoder(mu)
        return x_hat

    print("正常样本")
    e1 = fit_err_percentage(X, reconstruct(model, X), losses.square_loss)
    print("故障样本")
    e2 = fit_err_percentage(ano_X, reconstruct(model, ano_X), losses.square_loss)
    draw_fit_error(e1, e2, model_file)

    # 抽取一些正常和异常样本,看看编码器的拟合情况
    if data_dim == "1d":
        draw_fit_error_1d(X, ano_X, reconstruct(model, X), reconstruct(model, ano_X), chart_file=check_file)
    else:
        draw_fit_error_2d(X, ano_X, reconstruct(model, X), reconstruct(model, ano_X), chart_file=check_file)


if __name__ == '__main__':
    app.run(main)

# python .\anomaly_detection\vae.py --kind fc --corpus cwru --data_dim 1d --data_trans original
# python .\anomaly_detection\vae.py --kind fc --corpus jiangnan --data_dim 1d --data_trans original
