# -*- encoding: utf-8 -*-
'''
@File    :   cnn.py
@Time    :   2022/2/11 16:16
@Author  :   ZhangChaoYang
@Desc    :   基于多层卷积的分类器
'''
import os, sys

sys.path.insert(0, os.getcwd())
import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Conv2D, ReLU, MaxPooling2D, Dense, Flatten, Dropout, Softmax
from tensorflow.keras.optimizers import Adam
from util.view import draw_train_history
from tensorflow.keras.models import load_model
from util.view import plot_confusion_matrix
from util.work_flow import read_sample, classes
from sklearn.metrics import confusion_matrix
import numpy as np
from sklearn.model_selection import train_test_split
from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('corpus', "jiangnan", '数据集，可选jiangnan、paderborn')
flags.DEFINE_string('data_dim', "1d", '数据集，可选1d、2d')
flags.DEFINE_string('data_trans', "original",
                    '数据集，data_dim=1d时data_trans可选original、fft、stat，data_dim=2d时data_trans可选sfft、cwt')
flags.DEFINE_boolean('multi_class', False, '是否要多分类，即是否对故障进行具体的分类，二分类仅区分出正常样本和故障样本') # 只要运行命令里有--multi_class那它最终的取值就是True
flags.DEFINE_string('wc_4_train', "", '用于训练模型的工况列表（用逗号分隔）')
flags.DEFINE_string('wc_4_valid', "", '用于测试模型的工况列表（用逗号分隔）')


class CNN(Model):
    def __init__(self, kernel_sizes, filters, pool_size, class_num, feature_dim):
        assert len(filters) == len(kernel_sizes)
        super(CNN, self).__init__()
        self.class_num = class_num
        layers = []
        self.loss_fn = tf.nn.softmax_cross_entropy_with_logits if self.class_num > 2 else tf.nn.sigmoid_cross_entropy_with_logits
        for i, kernel_size in enumerate(kernel_sizes):
            layers.append(Conv2D(filters[i], kernel_size, padding='same', name="conv{}".format(i)))
            layers.append(ReLU(name="active{}".format(i)))
            layers.append(MaxPooling2D(padding='same', strides=pool_size, pool_size=pool_size, name="pool{}".format(i)))
        layers.append(Flatten(name="flatten"))
        layers.append(Dense(feature_dim, name="fc0"))
        layers.append(ReLU(name="active{}".format(len(kernel_sizes))))
        layers.append(Dropout(0.5, name="dropout"))
        if class_num > 2:
            layers.append(Dense(class_num, name="fc2"))
            layers.append(Softmax(name="class_softmax"))
        else:
            layers.append(Dense(1, name="fc2"))
        self.model = Sequential(layers)

    def call(self, inputs, training=None, mask=None):
        x = tf.expand_dims(inputs, axis=-1)  # 扩出channel这一维
        logits = self.model(x)
        return logits

    def train(self, x_train, y_train, x_test, y_test, learning_rate, epochs, chart_file=""):
        optim = Adam(learning_rate=learning_rate)
        self.compile(optim, loss=self.loss_fn)
        history = self.fit(x=x_train, y=y_train, validation_data=(x_test, y_test), epochs=epochs, shuffle=True)
        draw_train_history(history, chart_file=chart_file, begin_epoch=0)


def forward(model, x, weights):
    model.set_weights(weights)
    return model(x)


def fine_tune(model, generator, loss_fn, lr_alpha=1e-2):
    task = next(generator)
    x_support_set, x_query_set, y_support_set, y_query_set = task
    for i in range(5):
        theta = model.get_weights()  # theta是原模型参数
        with tf.GradientTape() as tape:
            pred_support = forward(model, x_support_set, theta)  # 正向计算出pred
            loss_support = loss_fn(y_support_set, pred_support)  # 计算loss
            grads = tape.gradient(loss_support, model.trainable_variables)  # 计算梯度
            theta = [t - lr_alpha * g for (t, g) in zip(theta, grads)]  # 梯度下降
        model.set_weights(theta)


def diagnose(corpus, train_wc, test_wc, data_dim, trans, multi_class=True, epochs=100):
    print(f'train_wc {train_wc} test_wc {test_wc}')
    class_num = 4
    learning_rate = 1e-3
    valid_ratio = 0.2
    model_name = "cnn"

    model_dir = os.path.join("data", "model", data_dim, corpus, trans, model_name,
                             "multi_class" if multi_class else "binary_class")
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    model = CNN(kernel_sizes=[(64, 1), (3, 1), (3, 1), (3, 1)], filters=[10, 8, 6, 4], pool_size=(2, 1),
                class_num=class_num if multi_class else 2, feature_dim=128)  # 调大filters，或增加卷积层的深度都有效果

    x_train, y_train = None, None
    for wc in train_wc:
        xarray, yarray = read_sample(os.path.join("corpus", "multi_class", data_dim, corpus, trans, wc), multi_class)
        if x_train is None:
            x_train = xarray
        else:
            x_train = np.vstack([x_train, xarray])
        if y_train is None:
            y_train = yarray
        else:
            y_train = np.vstack([y_train, yarray])
    x_test, y_test = None, None
    for wc in test_wc:
        if len(wc.strip()) == 0:
            continue
        xarray, yarray = read_sample(os.path.join("corpus", "multi_class", data_dim, corpus, trans, wc), multi_class)
        if x_test is None:
            x_test = xarray
        else:
            x_test = np.vstack([x_test, xarray])
        if y_test is None:
            y_test = yarray
        else:
            y_test = np.vstack([y_test, yarray])
    model_file_name = "-".join(train_wc)
    if x_test is None:  # test_wc为空
        model_file_name = "all"
        x_train, y_train = x_train[:100000], y_train[:100000]
        x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=valid_ratio, random_state=42)

    print('train sample', x_train.shape[0])
    print('valid sample', x_test.shape[0])
    train_file = os.path.join(model_dir, model_file_name + "_train_history.png")
    model.train(x_train, y_train, x_test, y_test, learning_rate, epochs, train_file)
    model_file = os.path.join(model_dir, model_file_name)
    model.save(model_file)

    model = load_model(model_file, compile=False)

    y_pred = model(x_test)
    verify_file = os.path.join(model_dir, model_file_name + "_verify.png")

    if multi_class:
        y = [np.where(r == 1)[0][0] for r in y_test]
        y_pred = [np.where(r == np.max(r))[0][0] for r in y_pred]
        cnf_matrix = confusion_matrix(y, y_pred)
        plot_confusion_matrix(cnf_matrix, classes, normalize=True, chart_file=verify_file)
    else:
        y_pred = np.where(y_pred > 0.5, 1, 0)
        cnf_matrix = confusion_matrix(y_test, y_pred)
        plot_confusion_matrix(cnf_matrix, ["anomly", "normal"], normalize=True, chart_file=verify_file)


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    multi_class = FLAGS.multi_class
    diagnose(corpus, train_wc=FLAGS.wc_4_train.split(","), test_wc=FLAGS.wc_4_valid.split(","), multi_class=multi_class,
             data_dim=data_dim, trans=data_trans)


if __name__ == '__main__':
    app.run(main)

# python fault_classification/cnn.py --corpus jiangnan --data_dim 1d --data_trans original  --wc_4_train 600,800,1000
# python fault_classification/cnn.py --corpus jiangnan --data_dim 1d --data_trans original --multi_class  --wc_4_train 600,800,1000
# python fault_classification/cnn.py --corpus jiangnan --data_dim 1d --data_trans original --multi_class  --wc_4_train 600,1000  --wc_4_valid 800
