# -*- encoding: utf-8 -*-
'''
@File    :   ganomaly.py
@Time    :   2021/11/26 11:29
@Author  :   ZhangChaoYang
@Desc    :   GANomaly https://arxiv.org/pdf/1805.06725v2.pdf
'''

import os
import sys

sys.path.insert(0, os.getcwd())
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.losses import MeanSquaredError, mean_squared_error
from tensorflow.keras.optimizers import Adam
from models.encoders import FCEncoder, ConvEncoder
from models.decoders import FCDecoder, ConvDecoder
from util.view import draw_train_history, draw_fit_error_1d, draw_fit_error_2d, draw_fit_error
from util.err_analyze import fit_err_percentage
from tensorflow.keras.callbacks import History
from util.work_flow import preprocess, gen_outfile, save_model, load_model, train_test_split
from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('kind', "fc", 'AE模型使用哪种网络，可选fc或conv')
flags.DEFINE_string('corpus', "jiangnan", '数据集，可选cwru、jiangnan、paderborn')
flags.DEFINE_string('data_dim', "1d", '数据集，可选1d、2d')
flags.DEFINE_string('data_trans', "original",
                    '数据集，data_dim=1d时data_trans可选original、fft、stat，data_dim=2d时data_trans可选sfft、cwt')


class GANomaly(Model):
    def __init__(self, kind, input_shape=None, hidden_dims=None, kernel_size=[], strides=[], filters=[]):
        super(GANomaly, self).__init__()
        self.alpha = 1E-3
        if kind == "fc":
            self.encoder1 = FCEncoder(hidden_dims)
            self.decoder = FCDecoder(input_shape, hidden_dims)
            self.encoder2 = FCEncoder(hidden_dims)
        elif kind == "conv":
            height, width = input_shape
            self.encoder1 = ConvEncoder(kernel_size, strides, filters)
            self.decoder = ConvDecoder(height, width, kernel_size, strides, filters)
            self.encoder2 = ConvEncoder(kernel_size, strides, filters)

    def call(self, inputs, training=None, mask=None):
        z1 = self.encoder1(inputs)
        x_hat = self.decoder(z1)
        z2 = self.encoder2(x_hat)
        return z1, x_hat, z2

    def train(self, X, batch_size, epochs, learning_rate, chart_file=""):
        X_train, X_test = train_test_split(X)
        optimizer = Adam(learning_rate=learning_rate)
        n = X_train.shape[0]
        min_delta = 1e-6
        patience = 30
        prev_loss = 0
        successive = 0
        train_losses = []
        test_losses = []
        for epoch in range(epochs):
            np.random.shuffle(X_train)  # 每轮都打乱训练样本
            loss_list = []
            for begin in range(0, n, batch_size):
                end = begin + batch_size
                if end > n:
                    end = n
                x = X_train[begin:end]
                with tf.GradientTape() as tape:
                    z1, x_hat, z2 = self(x)
                    recon_loss = tf.nn.l2_loss(x_hat - x)
                    encode_loss = tf.nn.l2_loss(z2 - z1)
                    loss = (self.alpha * recon_loss + encode_loss) / x.shape[
                        0]  # 重构误差和编码误差加权求和，整体优化。（GAN的生成器和判别器是分开优化的）
                    loss_list.append(loss)
                    grads = tape.gradient(loss, self.trainable_variables)
                    optimizer.apply_gradients(zip(grads, self.trainable_variables))
            curr_loss = np.mean(loss_list)

            z1, x_hat, z2 = self(X_test)
            recon_loss = tf.nn.l2_loss(X_test - x_hat)
            encode_loss = tf.nn.l2_loss(z1 - z2)
            test_loss = (self.alpha * recon_loss + encode_loss) / X_test.shape[0]

            train_losses.append(curr_loss)
            test_losses.append(test_loss)
            print("epoch {} loss {} val_loss {} val_construct_loss {} val_encode_loss {}".format(
                epoch, curr_loss, test_loss, recon_loss, encode_loss), flush=True)

            if prev_loss != 0:
                if math.fabs(prev_loss - curr_loss) < min_delta:
                    successive += 1
                    if successive >= patience:
                        break
                else:
                    successive = 0
            prev_loss = curr_loss

        history = History()
        history.history = {"loss": train_losses, "val_loss": test_losses}
        draw_train_history(history, chart_file=chart_file)


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    kind = FLAGS.kind
    learning_rate = 1e-3
    batch_size = 128
    epochs = 100
    model_name = "ganomaly"

    normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file = gen_outfile(
        data_dim,
        corpus,
        data_trans,
        model_name)
    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)
    model = GANomaly(kind=kind, input_shape=(X.shape[-2], X.shape[-1]), hidden_dims=[128, 32])
    # model.train(X, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, chart_file=train_history_file)
    # save_model(model, model_file)
    model = load_model(model_file)

    # 观察再编码误差
    err_func = lambda x, y: tf.reduce_mean(tf.square(x - y), axis=-1)
    print("正常样本")
    z1, x_hat, z2 = model(X)
    e1 = fit_err_percentage(z1, z2, err_func)
    print("故障样本")
    ano_z1, ano_x_hat, ano_z2 = model(ano_X)
    e2 = fit_err_percentage(ano_z1, ano_z2, err_func)
    draw_fit_error(e1, e2, model_file)

    # 抽取一些正常和异常样本,看看编码器的拟合情况
    if data_dim == "1d":
        draw_fit_error_1d(X, x_hat, ano_X, ano_x_hat, chart_file=check_file)
    else:
        draw_fit_error_2d(X, x_hat, ano_X, ano_x_hat, chart_file=check_file)


if __name__ == '__main__':
    app.run(main)

# python .\anomaly_detection\ganomaly.py  --kind fc --corpus cwru --data_dim 1d --data_trans original
# python .\anomaly_detection\ganomaly.py  --kind fc --corpus jiangnan --data_dim 1d --data_trans original
