# -*- encoding: utf-8 -*-
'''
@File    :   dagmm.py
@Time    :   2022/3/23 16:55
@Author  :   ZhangChaoYang
@Desc    :   
'''

import os
import sys

sys.path.insert(0, os.getcwd())
import numpy as np
import tensorflow_addons as tfa
from models import losses
from util.err_analyze import fit_err_percentage
from anomaly_detection.gmm import GMM
from util.work_flow import preprocess, gen_outfile, save_model, load_model, train_test_split, preprocess1
import pickle
from anomaly_detection.dagmm import DAGMM

from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('wc_4_train', "", '用于训练模型的工况列表（用逗号分隔）')
flags.DEFINE_string('wc_4_valid', "", '用于测试模型的工况列表（用逗号分隔）')


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    learning_rate = 1e-4
    batch_size = 256
    epochs = 100  # 由于样本数量不同，不区分工况时，paderborn用5轮，cwru和jiangnan用30轮。# 如果是基于general模型继续训练，则要把epochs设得非常小
    model_name = "dagmm_vary_wc"

    normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file = gen_outfile(
        data_dim,
        corpus,
        data_trans,
        model_name,
        work_loads=FLAGS.wc_4_train.split(","),
    )
    print(check_file)
    print(train_history_file)

    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)
    X_train, X_test = train_test_split(X)

    n_component = 2
    gmm = GMM()
    optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=1E-4)
    model = DAGMM(input_shape=(X.shape[-2], X.shape[-1]), hidden_dims=[128, 16],
                  estimater_dims=[3 * n_component, n_component],
                  # lambda2=1E-13# 如果是基于general模型继续训练，则要把lambda2设得非常小，不要让diag_loss影响模型参数
                  )

    # 基于滚动轴承基础模型，继续训练
    # general_model_file = os.path.join("data", "model", data_dim, "bearing", data_trans, "bearing_dagmm")
    # saved_model = load_model(general_model_file)
    # model.compresser = saved_model.compresser
    # model.estimater = saved_model.estimater
    # gmm.load(os.path.join(general_model_file, "gmm"))
    # with open(os.path.join(general_model_file, "opt"), "rb") as fp:
    #     optimizer.from_config(pickle.load(fp))

    model.train(X_train, X_test, batch_size=batch_size, epochs=epochs, optimizer=optimizer,
                chart_file=train_history_file)
    save_model(model, model_file)
    with open(os.path.join(model_file, "opt"), "wb") as fout:
        pickle.dump(optimizer.get_config(), fout, protocol=pickle.HIGHEST_PROTOCOL)
    # saved_model_cli show --dir $model_file --tag_set serve --signature_def serving_default
    model = load_model(model_file)

    print("正常样本")
    x_hat, gamma, normal_z = model(X)
    fit_err_percentage(X, x_hat, losses.square_loss)  # 编码器在正常样本上的拟合误差
    gmm.fit(normal_z, gamma)
    # DAGMM建议按energy loss划定阈值,以些来区分正常和异常样本
    energy = gmm.energy(normal_z)
    print("energy loss")
    sl = sorted(energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(sl))
        print("{}%\t{:.2e}".format(p, sl[index]))
    print("100%\t{:.2e}".format(sl[-1]))
    gmm.threshold = sl[int(0.9 * len(sl))]  # 以正常样本90%分位点作为判别阈值
    print("threshold", gmm.threshold)
    print("正常样本召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(sl) <= gmm.threshold) / len(sl)))
    gmm.save(os.path.join(model_file, "gmm"))
    gmm.load(os.path.join(model_file, "gmm"))

    import matplotlib.pyplot as plt

    print("不同工况的正常样本")
    X_valid = preprocess1(
        [os.path.join("corpus", data_dim, corpus, data_trans, wc, "normal.npy") for wc in
         FLAGS.wc_4_valid.split(",")], data_dim)
    valid_sl = []
    if X_valid is not None:
        x_hat, gamma, normal_z = model(X_valid)
        fit_err_percentage(X_valid, x_hat, losses.square_loss)  # 编码器在正常样本上的拟合误差
        # DAGMM建议按energy loss划定阈值,以些来区分正常和异常样本
        energy = gmm.energy(normal_z)
        print("energy loss")
        valid_sl = sorted(energy.numpy().tolist())
        for p in range(0, 100, 5):
            index = int(p / 100.0 * len(valid_sl))
            print("{}%\t{:.2e}".format(p, valid_sl[index]))
        print("100%\t{:.2e}".format(valid_sl[-1]))
        print(
            "不同工况下正样本召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(valid_sl) <= gmm.threshold) / len(valid_sl)))

    print("异常样本")
    ano_x_hat, gamma, ano_z = model(ano_X)
    fit_err_percentage(ano_X, ano_x_hat, losses.square_loss)  # 编码器在异常样本上的拟合误差

    # DAGMM建议按energy loss划定阈值,以此来区分正常和异常样本
    energy = gmm.energy(ano_z)
    print("energy loss")
    ano_sl = sorted(energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(ano_sl))
        print("{}%\t{:.2e}".format(p, ano_sl[index]))
    print("100%\t{:.2e}".format(ano_sl[-1]))
    print("异常样本召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(ano_sl) > gmm.threshold) / len(ano_sl)))

    if len(sl) > 10000:
        sl = np.random.choice(sl, 10000)
        sl = sorted(sl)
    if len(valid_sl) > 10000:
        valid_sl = np.random.choice(valid_sl, 10000)
        valid_sl = sorted(valid_sl)
    if len(ano_sl) > 10000:
        ano_sl = np.random.choice(ano_sl, 10000)
        ano_sl = sorted(ano_sl)
    plt.figure(figsize=(8, 8))
    ax = plt.subplot()
    plt.suptitle("蓝色:正常样本,绿色:不同工况的正常样本,红色:异常样本")
    ax.axhline(gmm.threshold, color='black', linestyle='dashed')
    ax.scatter(range(len(sl) // 100 * 99), sl[:len(sl) // 100 * 99], color='b', s=1)  # 正常样本用蓝色
    if len(valid_sl) > 0:
        ax.scatter(range(len(valid_sl) // 100 * 99), valid_sl[:len(valid_sl) // 100 * 99], color='g',
                   s=1)  # 不同工况的正常样本用绿色
    ax.scatter(range(len(ano_sl) // 100 * 90), ano_sl[:len(ano_sl) // 100 * 90], color='r', s=1)  # 异常样本用红色
    plt.savefig(check_file, format="png")

    plt.show()


if __name__ == "__main__":
    app.run(main)

# python .\anomaly_detection\vary_work_condition\dagmm.py --corpus jiangnan --data_dim 1d --data_trans original --wc_4_train 600,1000 --wc_4_valid 800
