# -*- encoding: utf-8 -*-
'''
@File    :   transfer_gmm.py
@Time    :   2021/12/7 17:29
@Author  :   ZhangChaoYang
@Desc    :   在transfer的基础上，基于target_invariant_feature再接一个DAGMM的Estimation网络
'''

import os
import sys

import matplotlib.pyplot as plt

sys.path.insert(0, os.getcwd())
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import Model, Sequential
from tensorflow.keras.losses import cosine_similarity, MeanSquaredError
from tensorflow.keras.layers import Dense
import numpy as np
from util.work_flow import preprocess1, preprocess, save_model, load_model, train_test_split_limit_memory
from util.view import draw_train_history
from tensorflow.keras.callbacks import History
from models.encoders import FCEncoder
from models.decoders import FCDecoder
import math
from anomaly_detection.gmm import GMM
from anomaly_detection.dagmm import Estimation,FLAGS
from absl import app
from absl import flags

flags.DEFINE_string('wc_4_train', "", '用于训练模型的工况列表（用逗号分隔）')
flags.DEFINE_string('wc_4_valid', "", '用于测试模型的工况列表（用逗号分隔）')


class TransferGMM(Model):
    def __init__(self, input_shape, hidden_dims, estimater_dims, alpha=1.0, beta=1.0, lmbd=1.0, phi=1.0, lambda1=0.1,
                 lambda2=0.005):
        super(TransferGMM, self).__init__()
        self.differnce_encoder = FCEncoder(hidden_dims)
        self.invariant_encoder = FCEncoder(hidden_dims)
        self.decoder = FCDecoder(input_shape, hidden_dims)
        self.classifier = Sequential([Dense(units=8, activation=tf.nn.tanh),
                                      Dense(units=1, use_bias=True, activation=tf.nn.sigmoid)])
        self.domain_discriminator = Sequential([Dense(units=8, activation=tf.nn.tanh),
                                                Dense(units=1, use_bias=True, activation=tf.nn.sigmoid)])
        self.alpha = alpha
        self.beta = beta
        self.lmbd = lmbd
        self.phi = phi
        self.lambda1 = lambda1
        self.lambda2 = lambda2
        self.estimater = Estimation(estimater_dims)
        self.K = estimater_dims[-1]  # estimater最后一层的维度即为GMM component的个数

    def call(self, inputs, training=None, mask=None):
        Xs = inputs[0]  # source domain sample
        Xt = inputs[1]  # target domain sample
        # 领域不变特征
        source_invariant_feature = self.invariant_encoder(Xs)
        target_invariant_feature = self.invariant_encoder(Xt)
        # 领域差异特征
        source_differnce_feature = self.differnce_encoder(Xs)
        target_differnce_feature = self.differnce_encoder(Xt)
        source_label_hat = self.classifier(source_invariant_feature)
        source_x_hat = self.decoder(source_differnce_feature + source_invariant_feature)
        target_x_hat = self.decoder(target_differnce_feature + target_invariant_feature)
        gamma = self.estimater(target_invariant_feature)
        source_domain_hat = self.domain_discriminator(source_invariant_feature)
        target_domain_hat = self.domain_discriminator(target_invariant_feature)
        return (source_x_hat, target_x_hat, source_label_hat, source_invariant_feature, source_differnce_feature,
                target_invariant_feature, target_differnce_feature, source_domain_hat, target_domain_hat, gamma)

        # return (source_x_hat, target_x_hat, source_label_hat, source_invariant_feature, source_differnce_feature,
        #         target_invariant_feature, target_differnce_feature, 0, 0, gamma)

    def get_loss(self, source_x_hat, target_x_hat, source_label_hat, source_invariant_feature, source_differnce_feature,
                 target_invariant_feature, target_differnce_feature, source_domain_hat, target_domain_hat, inputs,
                 source_true_label, gamma):
        Xs = inputs[0]  # source domain sample
        Xt = inputs[1]  # target domain sample
        if Xs.shape[0] != Xt.shape[0]:
            print(Xs.shape[0], Xt.shape[0])
            sys.exit(1)

        gmm = GMM()
        gmm.fit(target_invariant_feature, gamma)
        energy = self.lambda1 * tf.reduce_mean(gmm.energy(target_invariant_feature))
        diag_loss = self.lambda2 * gmm.cov_diag_loss()

        # 用于判别源域or目标域的分类误差。即原文中的Similarity Loss。AUC=0.43
        domain_labels = tf.concat([tf.zeros((Xs.shape[0], 1)), tf.ones((Xt.shape[0], 1))], axis=0)  # 源域用0，目标域用1
        domain_hat = tf.concat([source_domain_hat, target_domain_hat], axis=0)
        discriminate_loss = self.beta * tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(tf.squeeze(domain_labels), tf.squeeze(domain_hat)), axis=0)
        # 对比损失。源域和目标域样本类别相同时其cosin相似度应该偏小，类别不同时cosin相似度应该偏小。AUC=0.36
        sign = tf.map_fn(lambda x: 1 if x > 0 else -1, source_true_label)  # 由于目标域都是正样本，所以源域为正样本时sign取1，源域为负样本时sign取-1
        # 注意cosine_similarity实际上不是similarity，而是distance，即完全相同时cosine_similarity的值为-1，cosine_similarity时表示正交
        contrastive_loss = self.phi * tf.reduce_mean(
            sign * cosine_similarity(source_invariant_feature, target_invariant_feature), axis=0)
        # 源域上的分类误差。即原文中的Classification Loss.
        classifier_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(tf.squeeze(source_true_label), tf.squeeze(source_label_hat)),
            axis=0)
        # 确保invariant_feature和differnce_feature的正交性（夹角趋于90度，cosin趋于0）。即原文中的Difference Loss
        orthogonal_loss = self.alpha * tf.reduce_mean(
            tf.pow(cosine_similarity(source_invariant_feature, source_differnce_feature), 2) + tf.pow(
                cosine_similarity(target_invariant_feature, target_differnce_feature), 2), axis=0)
        # 源域和目标域的重构误差。即原文中的Reconstruction Loss.
        reconstruct_loss = self.lmbd * (MeanSquaredError()(Xs, source_x_hat) + MeanSquaredError()(Xt, target_x_hat))
        loss = classifier_loss + orthogonal_loss + discriminate_loss + reconstruct_loss + contrastive_loss + energy + diag_loss
        return (loss, classifier_loss, orthogonal_loss, discriminate_loss, reconstruct_loss, energy, diag_loss)

    def extract_batch(self, b, e, x):
        n = x.shape[0]
        begin = b % n
        end = e % n
        s1 = b // n
        s2 = e // n
        if s1 == s2:
            return x[begin:end]
        else:
            lst = [x[begin:]]
            for i in range(s2 - s1 - 1):
                lst.append(x)
            lst.append(x[:end])
            return tf.concat(lst, axis=0)

    def train(self, X_source, X_target, batch_size, epochs, learning_rate, chart_file=""):
        X_train, X_test = train_test_split_limit_memory(X_source)
        X_target_train, X_target_test = train_test_split_limit_memory(X_target)
        optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=1e-4)
        n = X_train.shape[0]
        min_delta = 1e-6
        patience = 20
        prev_loss = 0
        successive = 0
        train_losses = []
        test_losses = []
        for epoch in range(epochs):
            np.random.shuffle(X_train)  # 每轮都打乱训练样本
            np.random.shuffle(X_target_train)
            loss_list = []
            for begin in range(0, n, batch_size):
                end = begin + batch_size
                if end > n:
                    end = n
                Xs = X_train[begin:end, :-1, :]
                true_label = tf.cast(X_train[begin:end, -1, :], tf.float32)
                Xt = self.extract_batch(begin, end, X_target_train)
                with tf.GradientTape() as tape:
                    inputs = [Xs, Xt]
                    source_x_hat, target_x_hat, source_label_hat, source_invariant_feature, source_differnce_feature, target_invariant_feature, target_differnce_feature, source_domain_hat, target_domain_hat, gamma = self(
                        inputs)
                    loss, classifier_loss, orthogonal_loss, transfer_loss, reconstruct_loss, energy, diag_loss = self.get_loss(
                        source_x_hat, target_x_hat, source_label_hat, source_invariant_feature,
                        source_differnce_feature, target_invariant_feature, target_differnce_feature, source_domain_hat,
                        target_domain_hat, inputs, true_label, gamma)
                    loss_list.append(loss)
                    grads = tape.gradient(loss, self.trainable_variables)
                    optimizer.apply_gradients(zip(grads, self.trainable_variables))
            curr_loss = np.mean(loss_list)

            Xs = X_test[:, :-1, :]
            true_label = tf.cast(X_test[:, -1, :], tf.float32)
            begin = 0
            end = Xs.shape[0]
            Xt = self.extract_batch(begin, end, X_target_test)
            inputs = [Xs, Xt]
            source_x_hat, target_x_hat, source_label_hat, source_invariant_feature, source_differnce_feature, target_invariant_feature, target_differnce_feature, source_domain_hat, target_domain_hat, gamma = self(
                inputs)
            test_loss, classifier_loss, orthogonal_loss, transfer_loss, reconstruct_loss, energy, diag_loss = self.get_loss(
                source_x_hat, target_x_hat, source_label_hat, source_invariant_feature,
                source_differnce_feature, target_invariant_feature, target_differnce_feature, source_domain_hat,
                target_domain_hat, inputs, true_label, gamma)

            train_losses.append(curr_loss)
            test_losses.append(test_loss)
            print(
                "epoch {} loss {} val_loss {} val_classifier_loss {} val_orthogonal_loss {} val_transfer_loss {},val_reconstruct_loss {} val_energy_loss {} val_diag_loss {}".format(
                    epoch, curr_loss, test_loss, classifier_loss, orthogonal_loss, transfer_loss, reconstruct_loss,
                    energy, diag_loss),
                flush=True)

            if prev_loss != 0:
                if math.fabs(prev_loss - curr_loss) < min_delta:
                    successive += 1
                    if successive >= patience:
                        break
                else:
                    successive = 0
            prev_loss = curr_loss

        history = History()
        history.history = {"loss": train_losses, "val_loss": test_losses}
        draw_train_history(history, chart_file=chart_file)


def cal_energy(model, x, K):
    invariant_feature = model.invariant_encoder(x)
    gamma = model.estimater(invariant_feature)
    gmm = GMM()
    gmm.fit(invariant_feature, gamma)
    energy = gmm.energy(invariant_feature)
    return energy


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans

    learning_rate = 1e-4
    batch_size = 256
    epochs = 30  # paderborn用10轮，cwru用30轮

    normal_data_files = [os.path.join("corpus", data_dim, corpus, data_trans, "normal.npy")]
    anomaly_data_file = os.path.join("corpus", data_dim, corpus, data_trans, "anomaly.npy")
    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)
    labels = np.vstack(
        [np.ones((X.shape[0], 1, 1), dtype=float),
         np.zeros((ano_X.shape[0], 1, 1), dtype=float)])  # 正常样本用1，异常样本用0
    X = np.vstack([X, ano_X])
    X_source = np.hstack([X, labels])

    target_train_data_files = [os.path.join("corpus", data_dim, corpus, data_trans, work_load, "normal.npy") for
                               work_load in FLAGS.wc_4_train.split(",")]
    target_anomaly_data_file = os.path.join("corpus", data_dim, corpus, data_trans, "anomaly.npy")
    target_train_normal_data, target_anomaly_data = preprocess(target_train_data_files, target_anomaly_data_file,
                                                               data_dim)

    model_name = "transfer_gmm"
    train_history_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_train_history.png")
    check_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_check.png")
    model_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name)

    n_component = 4
    model = TransferGMM(input_shape=(target_train_normal_data.shape[-2], target_train_normal_data.shape[-1]),
                        hidden_dims=[128, 32], estimater_dims=[10, n_component])
    model.train(X_source, target_train_normal_data, batch_size, epochs, learning_rate, chart_file=train_history_file)
    save_model(model, model_file)
    model = load_model(model_file)

    target_test_normal_data = preprocess1(
        [os.path.join("corpus", data_dim, corpus, data_trans, wc, "normal.npy") for wc in FLAGS.wc_4_valid.split(",")],
        data_dim)

    target_normal_train_energy = cal_energy(model, target_train_normal_data, n_component)
    target_normal_test_energy = cal_energy(model, target_test_normal_data, n_component)
    target_anomaly_energy = cal_energy(model, target_anomaly_data, n_component)
    print("目标领域的正常样本（参与训练）energy loss")
    sl = sorted(target_normal_train_energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(sl))
        print("{}%\t{:.2e}".format(p, sl[index]))
    print("100%\t{:.2e}".format(sl[-1]))
    threshold = sl[int(0.8 * len(sl))]  # 以正常样本80%分位点作为判别阈值
    print("threshold", threshold)
    print("目标领域的正常样本（未参与训练）energy loss")
    valid_sl = sorted(target_normal_test_energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(valid_sl))
        print("{}%\t{:.2e}".format(p, valid_sl[index]))
    print("100%\t{:.2e}".format(valid_sl[-1]))
    print("目标领域的正常样本（未参与训练）召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(valid_sl) <= threshold) / len(valid_sl)))
    print("目标领域的异常样本（未参与训练）energy loss")
    ano_sl = sorted(target_anomaly_energy.numpy().tolist())
    for p in range(0, 100, 5):
        index = int(p / 100.0 * len(ano_sl))
        print("{}%\t{:.2e}".format(p, ano_sl[index]))
    print("100%\t{:.2e}".format(ano_sl[-1]))
    print("目标领域的异常样本（未参与训练）召回率{:.2f}%".format(100 * np.count_nonzero(np.asarray(ano_sl) > threshold) / len(ano_sl)))

    if len(sl) > 10000:
        sl = np.random.choice(sl, 10000)
        sl = sorted(sl)
    if len(valid_sl) > 10000:
        valid_sl = np.random.choice(valid_sl, 10000)
        valid_sl = sorted(valid_sl)
    if len(ano_sl) > 10000:
        ano_sl = np.random.choice(ano_sl, 10000)
        ano_sl = sorted(ano_sl)
    plt.figure(figsize=(8, 8))
    ax = plt.subplot()
    plt.suptitle("蓝色:目标领域的正常样本（参与训练）,绿色:目标领域的正常样本（未参与训练）,红色:目标领域的异常样本（未参与训练）")
    ax.axhline(threshold, color='black', linestyle='dashed')
    ax.scatter(range(len(sl) // 100 * 99), sl[:len(sl) // 100 * 99], color='b')  # 正常样本用蓝色
    ax.scatter(range(len(valid_sl) // 100 * 99), valid_sl[:len(valid_sl) // 100 * 99], color='g')  # 不同工况的正常样本用绿色
    ax.scatter(range(len(ano_sl) // 100 * 99), ano_sl[:len(ano_sl) // 100 * 99], color='r')  # 异常样本用红色
    plt.savefig(check_file, format="png")
    plt.show()


if __name__ == '__main__':
    app.run(main)
# python .\anomaly_detection\vary_work_condition\transfer_gmm.py --corpus jiangnan --data_dim 1d --data_trans original --wc_4_train 600,1000 --wc_4_valid 800
# python .\anomaly_detection\vary_work_condition\transfer_gmm.py --corpus jiangnan --data_dim 1d --data_trans original --wc_4_train 600,800 --wc_4_valid 1000
# python .\anomaly_detection\vary_work_condition\transfer_gmm.py --corpus jiangnan --data_dim 1d --data_trans original --wc_4_train 800,1000 --wc_4_valid 600
#
# python .\anomaly_detection\vary_work_condition\transfer_gmm.py --corpus paderborn --data_dim 1d --data_trans original --wc_4_train N15_M07_F10,N09_M07_F10 --wc_4_valid N15_M01_F10,N15_M07_F04
