# -*- encoding: utf-8 -*-
'''
@File    :   maml.py
@Time    :   2022/2/12 14:02
@Author  :   ZhangChaoYang
@Desc    :   Model-Agnostic Meta-Learning（MAML元学习）。http://proceedings.mlr.press/v70/finn17a/finn17a.pdf
'''

import os, sys

sys.path.insert(0, os.getcwd())
import tensorflow as tf
from util.view import draw_train_history
from util.view import plot_confusion_matrix
from util.work_flow import read_sample, classes
from sklearn.metrics import confusion_matrix
import numpy as np
from fault_classification.cnn import CNN, FLAGS
from tensorflow.keras.callbacks import History
from collections import defaultdict
import random
from absl import flags
from absl import app

flags.DEFINE_string('window_size', '1024', '一条样本包含多少个振动样本点')
repeat = 1  # 在support set上执行repeat次梯度下降得到新的参数\theta后，再拿到query set上计算loss


def task_generator(corpus, work_loads, data_dim, trans, class_names, support_num=5, query_num=15):
    '''
    抽样生成一个task，包含多个类别，一个类别包含一个query集合和一个support集合。相当于一个task中有class_num*(support_num+query_num)个原始样本
    :param corpus: 数据集的名称，跟目录保持一致
    :param work_loads: 加载哪几种工况的数据。模型跨工况进行迁移
    :param data_dim: 输入数据是1d还是2d
    :param trans: 对原始振动信号做了何种变换
    :param class_names: 类别名称列表
    :param support_num: support集合中包含几个样本
    :param query_num: query集合中包含几个样本。support_num<query_num模拟实际情况：已知的样本少，未知的样本多
    :return:
    '''
    base_dir = os.path.join("corpus", "multi_class", data_dim, corpus)
    X = defaultdict(dict)
    for cls in class_names:
        for wc in work_loads:
            dir = os.path.join(base_dir, trans, wc)
            infile = os.path.join(dir, cls) + ".npy"
            X[cls][wc] = np.load(infile)
            print(f'class {cls} work_load {wc} data count {len(X[cls][wc])}')

    while True:
        x_support_set = []
        x_query_set = []
        y_support_set = []
        y_query_set = []
        # support_set使用一种工况，query_set使用一种工况，使用哪种工况每次都随机选择(因为就是要模拟跨工况迁移的情况)
        support_wc = random.choice(work_loads)
        query_wc = random.choice(work_loads)
        for i, cls in enumerate(class_names):
            x = X[cls][support_wc]
            x_support = x[np.random.choice(x.shape[0], support_num, replace=False)]
            label = np.zeros(shape=(len(class_names),))
            label[i] = 1
            y_support = np.tile(label, reps=(support_num, 1))
            x_support_set.append(x_support)
            y_support_set.append(y_support)

            x = X[cls][query_wc]
            x_query = x[np.random.choice(x.shape[0], query_num, replace=False)]
            y_query = np.tile(label, reps=(query_num, 1))
            x_query_set.append(x_query)
            y_query_set.append(y_query)
        yield (np.vstack(x_support_set), np.vstack(x_query_set), np.vstack(y_support_set), np.vstack(y_query_set))


class MAML(object):
    def __init__(self, input_shape, model, loss_fn, lr_alpha, lr_beta):
        self.model = model
        self.model.build(input_shape)
        self.input_shape = input_shape
        self.loss_fn = loss_fn
        self.lr_alpha = lr_alpha
        self.lr_beta = lr_beta

    def forward(self, x, weights):
        self.model.set_weights(weights)
        return self.model(x)

    def build(self, batch):
        with tf.GradientTape() as batch_tape:
            theta = self.model.get_weights()  # theta是原模型参数
            query_loss_list = []
            for task in batch:
                x_support_set, x_query_set, y_support_set, y_query_set = task
                theta_dash = theta.copy()

                # 在support set上更新模型参数theta
                for i in range(repeat):
                    with tf.GradientTape() as tape:
                        pred_support = self.forward(x_support_set, theta_dash)  # 正向计算出pred
                        loss_support = self.loss_fn(y_support_set, pred_support)  # 计算loss
                        grads = tape.gradient(loss_support, self.model.trainable_variables)  # 计算梯度
                        theta_dash = [t - self.lr_alpha * g for (t, g) in zip(theta_dash, grads)]  # 梯度下降

                # 用更新后的参数theta计算在query set上的loss
                pred_query = self.forward(x_query_set, theta_dash)
                loss_query = self.loss_fn(y_query_set, pred_query)
                query_loss_list.append(loss_query)

            batch_loss = tf.reduce_mean(query_loss_list, axis=0)  # 对一个task batch内所有query loss求和
            grads = batch_tape.gradient(batch_loss, self.model.trainable_variables)  # batch_loss对原模型参数theta求导
            theta = [t - self.lr_beta * g for (t, g) in zip(theta, grads)]  # 更新原模型参数
            self.model.set_weights(theta)
        return tf.reduce_mean(batch_loss)

    def pre_train(self, batch_size, steps, corpus, work_loads, data_dim, trans, class_names):
        step = 0
        batch = []
        losses = []
        for task in task_generator(corpus, work_loads, data_dim, trans, class_names):
            step += 1
            x_support_set, x_query_set, y_support_set, y_query_set = task
            batch.append((x_support_set, x_query_set, y_support_set, y_query_set))
            if len(batch) == batch_size:
                batch_loss = self.build(batch)
                if step % 100 == 0:
                    print(f'step {step} loss {batch_loss}')
                    losses.append(batch_loss)
                batch.clear()
            if step >= steps:
                break

        model_dir = os.path.join("data", "model", data_dim, corpus, trans, "maml",
                                 "multi_class" if len(class_names) > 2 else "binary_class")
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)

        model_file_name = "-".join(work_loads)
        train_file = os.path.join(model_dir, model_file_name + "_train_history.png")
        history = History()
        history.history = {"loss": losses}
        draw_train_history(history, chart_file=train_file, begin_epoch=0)
        model_file = os.path.join(model_dir, model_file_name, model_file_name)
        self.model.save_weights(model_file)
        print(f'save model to {model_file}')
        return model_file

    def fine_tune(self, generator, model_file):
        '''
        fine_tune只使用一个task训练一次
        :param generator:
        :param model_file:
        :return:
        '''
        self.model.build(self.input_shape)
        self.model.load_weights(model_file)

        task = next(generator)
        x_support_set, x_query_set, y_support_set, y_query_set = task
        for i in range(repeat * 5):
            theta = self.model.get_weights()  # theta是原模型参数
            with tf.GradientTape() as tape:
                pred_support = self.forward(x_support_set, theta)  # 正向计算出pred
                loss_support = self.loss_fn(y_support_set, pred_support)  # 计算loss
                grads = tape.gradient(loss_support, self.model.trainable_variables)  # 计算梯度
                theta = [t - self.lr_alpha * g for (t, g) in zip(theta, grads)]  # 梯度下降

            self.model.set_weights(theta)
        # 用更新后的参数theta计算在query set上的loss
        pred_query = self.forward(x_query_set, theta)
        loss_query = self.loss_fn(y_query_set, pred_query)
        return tf.reduce_mean(loss_query)


def verify(model, input_shape, corpus, work_loads, data_dim, trans, class_names, model_file):
    x_test, y_test = None, None
    for wc in work_loads:
        xarray, yarray = read_sample(os.path.join("corpus", "multi_class", data_dim, corpus, trans, wc),
                                     len(class_names) > 2)
        if x_test is None:
            x_test = xarray
        else:
            x_test = np.vstack([x_test, xarray])
        if y_test is None:
            y_test = yarray
        else:
            y_test = np.vstack([y_test, yarray])

    model.build(input_shape)
    model.load_weights(model_file)
    y_pred = model(x_test)
    verify_file = os.path.join(model_file + "_verify.png")

    if len(class_names) > 2:
        y = [np.where(r == 1)[0][0] for r in y_test]
        y_pred = [np.where(r == np.max(r))[0][0] for r in y_pred]
        cnf_matrix = confusion_matrix(y, y_pred)
        plot_confusion_matrix(cnf_matrix, classes, normalize=True, chart_file=verify_file)
    else:
        y_pred = np.where(y_pred > 0.5, 1, 0)
        cnf_matrix = confusion_matrix(y_test, y_pred)
        plot_confusion_matrix(cnf_matrix, ["anomly", "normal"], normalize=True, chart_file=verify_file)


def train(corpus, train_wc, test_wc, data_dim="1d", trans="original", window_size=1024,
          class_names=['normal', 'ball', 'inner', 'outer']):
    batch_size = 10
    steps = 10000  # 一共训练多少个step，一个step训练一个task
    seq_len = window_size
    if trans == "fft":
        seq_len = window_size // 2
    input_shape = (1, seq_len, 1)
    base_model = CNN(kernel_sizes=[(64, 1), (3, 1), (3, 1), (3, 1)], filters=[10, 8, 6, 4], pool_size=(2, 1),
                     class_num=len(class_names), feature_dim=128)
    # base_model = ANN(hidden_sizes=[128, 64, 32, 16, 8], class_num=len(class_names))
    model = MAML(input_shape=input_shape, model=base_model, loss_fn=tf.nn.softmax_cross_entropy_with_logits,
                 lr_alpha=5e-3, lr_beta=1e-2)
    model_file = model.pre_train(batch_size, steps, corpus, train_wc, data_dim, trans, class_names)

    # 通过fine_tune多测几次，得到loss的均值
    query_losses = []
    generator = task_generator(corpus, test_wc, data_dim, trans, class_names)
    for i in range(10):
        query_loss = model.fine_tune(generator, model_file)
        print(f'query loss {query_loss}')
        query_losses.append(query_loss)
    print(f'mean query loss {np.mean(query_losses)}')
    base_model.save_weights(model_file)

    verify(base_model, input_shape, corpus, test_wc, data_dim, trans, class_names, model_file)


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    multi_class = FLAGS.multi_class
    window_size = int(FLAGS.window_size)
    train(corpus, train_wc=FLAGS.wc_4_train.split(","), test_wc=FLAGS.wc_4_valid.split(","), data_dim=data_dim,
          trans=data_trans,
          window_size=window_size)


if __name__ == '__main__':
    app.run(main)

# python .\fault_classification\vary_work_condition\maml.py --corpus jiangnan --data_dim 1d --data_trans original --multi_class  --wc_4_train 600,1000  --wc_4_valid 800
