# -*- encoding: utf-8 -*-
'''
@File    :   mae.py
@Time    :   2021/12/1 11:45
@Author  :   ZhangChaoYang
@Desc    :   Masked Autoencoders https://arxiv.org/abs/2111.06377
'''

import os
import sys

import matplotlib.pyplot as plt

sys.path.insert(0, os.getcwd())
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Dense, Embedding, BatchNormalization
from models.transformer import Transformer
from models.patch import Patches
import numpy as np
from models import metrics
from models import losses
from util.view import draw_train_history, draw_fit_error
from util.err_analyze import fit_err_percentage
from util.work_flow import preprocess, gen_outfile, save_model, load_model
from absl import flags
from absl import app

FLAGS = flags.FLAGS

flags.DEFINE_string('kind', "fc", 'AE模型使用哪种网络，可选fc或transformer，transformer的结果很糟糕')
flags.DEFINE_string('corpus', "jiangnan", '数据集，可选cwru、jiangnan、paderborn')
flags.DEFINE_string('data_dim', "1d", '数据集，可选1d、2d')
flags.DEFINE_string('data_trans', "original",
                    '数据集，data_dim=1d时data_trans可选original、fft、stat，data_dim=2d时data_trans可选sfft、cwt')
flags.DEFINE_float('mask_ratio', 0.75, '输入数据掩掉的比例')


class MAEEncoder(Model):
    def __init__(self, mask_ratio, img_shape, patch_size, projection_dim, kind, hidden_dims, layer_num, head_size,
                 down_stream=False):
        super(MAEEncoder, self).__init__()
        self.mask_ratio = mask_ratio
        self.down_stream = down_stream
        patch_height, patch_width = patch_size
        patch_area = patch_height * patch_width
        img_height, img_width = img_shape
        patch_rows = img_height // patch_height  # 如果img_size不能被patch_size整除，末尾的数行或数列会被丢弃掉
        patch_cols = img_width // patch_width

        self.num_patches = patch_rows * patch_cols
        self.projection = Dense(units=projection_dim)
        self.patch = Patches(patch_size=(patch_height, patch_width))
        self.position_embed_layer = Embedding(input_dim=self.num_patches, output_dim=projection_dim)
        transform_layers = []  # 由于Transformer不改变输入的维度，所以可以连续过多层Transformer
        if kind == "transformer":
            for i in range(layer_num):
                transform_layers.append(Transformer(embedding_dim=projection_dim, hidden_dim=projection_dim * 2,
                                                    head_size=head_size))
        elif kind == "fc":
            for hidden_dim in hidden_dims[:-1]:
                transform_layers.append(BatchNormalization())
                transform_layers.append(Dense(units=hidden_dim, activation=tf.nn.relu))
            transform_layers.append(Dense(units=hidden_dims[-1]))
            for hidden_dim in hidden_dims[-2::-1]:
                transform_layers.append(BatchNormalization())
                transform_layers.append(Dense(units=hidden_dim, activation=tf.nn.relu))
            transform_layers.append(BatchNormalization())
            # decode的最后一层不加激活函数，不对输出的范围作约束
            transform_layers.append(Dense(units=projection_dim))
        else:
            raise Exception("unsupported MAE type: {}".format(kind))
        self.transform = Sequential(transform_layers)
        self.mask_token = tf.Variable(tf.random.normal([1, patch_area]), trainable=True)

    def call(self, inputs, *args, **kwargs):
        if len(inputs.shape) == 3:
            inputs = tf.expand_dims(inputs, axis=-1)

        # patch的输入需要是4维Tensor:(batch_szie,height,width,channel)。此时patches的shape:(batch_szie,num_patches,patch_area)
        patches = self.patch(inputs)

        batch_size = tf.shape(patches)[0]  # tf.shape(x)是获取x的动态shape，而x.shape是获取x的静态shape。batch_size这一维显然是动态的
        # if self.down_stream:  #在下游任务中不进行mask
        #     positions = tf.range(start=0, limit=batch_size, delta=1)
        #     pos_embeddings = self.position_embed_layer(positions[tf.newaxis, ...])  # (1,num_patches,projection_dim)
        #     patch_embeddings = self.projection(patches) + pos_embeddings  # patch过一层线性映射，再加上位置信息
        #     trans_out = self.transform(patch_embeddings)
        #     return trans_out
        # else:
        permutation = np.random.permutation(self.num_patches)
        mask_tokens, unmask_patches, mask_positions, unmask_positions = self.shuffle(patches, batch_size,
                                                                                     permutation)
        pos_embeddings = self.position_embed_layer(
            unmask_positions[tf.newaxis, ...])  # (1,num_patches,projection_dim)
        pos_embeddings = tf.tile(pos_embeddings, [batch_size, 1,
                                                  1])  # (batch_size,num_patches,projection_dim)。tile在对应维度上进行复制，[batch_size, 1, 1]相当于在第0维上复制了batch_size份
        mask_pos_embeddings = self.position_embed_layer(mask_positions[tf.newaxis, ...])
        mask_pos_embeddings = tf.tile(mask_pos_embeddings, [batch_size, 1, 1])
        unmask_patch_embeddings = self.projection(unmask_patches) + pos_embeddings  # patch过一层线性映射，再加上位置信息
        mask_patch_embeddings = self.projection(mask_tokens) + mask_pos_embeddings
        trans_out = self.transform(unmask_patch_embeddings)
        if self.down_stream:  # 在下游任务中同样进行mask，只是不需要还原原来的维度
            return trans_out

        patches_hat = tf.concat([trans_out, mask_patch_embeddings], axis=1)
        patches_hat = self.unshuffle(patches_hat, permutation)
        return patches_hat

    def shuffle(self, patches, batch_size, permutation):
        mask_num = int(self.mask_ratio * self.num_patches)
        unmask_patch_list = []
        unmask_positions = []
        mask_positions = []
        for i in range(self.num_patches - mask_num):
            unmask_patch_list.append(patches[:, permutation[i]:permutation[i] + 1, :])
            unmask_positions.append(permutation[i])
        for i in range(self.num_patches - mask_num, self.num_patches):
            mask_positions.append(permutation[i])
        unmask_patches = tf.concat(unmask_patch_list, axis=1)
        mask_tokens = tf.repeat(self.mask_token, repeats=mask_num, axis=0)
        mask_tokens = tf.repeat(mask_tokens[tf.newaxis, ...], repeats=batch_size, axis=0)
        return mask_tokens, unmask_patches, tf.convert_to_tensor(mask_positions), tf.convert_to_tensor(unmask_positions)

    def unshuffle(self, patches, permutation):
        patch_list = []
        rand_indices = np.argsort(permutation)
        for i in rand_indices:
            patch_list.append(patches[:, i:i + 1, :])
        return tf.concat(patch_list, axis=1)


class MAEDecoder(Model):
    def __init__(self, img_shape, patch_size, projection_dim, kind, hidden_dims, layer_num, head_size):
        super(MAEDecoder, self).__init__()
        patch_height, patch_width = patch_size
        _, self.img_width = img_shape
        patch_area = patch_height * patch_width
        transform_layers = []  # 由于Transformer不改变输入的维度，所以可以连续过多层Transformer
        if kind == "transformer":
            for i in range(layer_num):
                transform_layers.append(Transformer(embedding_dim=projection_dim, hidden_dim=projection_dim * 2,
                                                    head_size=head_size))
        elif kind == "fc":
            for hidden_dim in hidden_dims[:-1]:
                transform_layers.append(BatchNormalization())
                transform_layers.append(Dense(units=hidden_dim, activation=tf.nn.relu))
            transform_layers.append(Dense(units=hidden_dims[-1]))
            for hidden_dim in hidden_dims[-2::-1]:
                transform_layers.append(BatchNormalization())
                transform_layers.append(Dense(units=hidden_dim, activation=tf.nn.relu))
            transform_layers.append(BatchNormalization())
            # decode的最后一层不加激活函数，不对输出的范围作约束
            transform_layers.append(Dense(units=projection_dim))
        else:
            raise Exception("unsupported MAE type: {}".format(kind))
        self.transform = Sequential(transform_layers)
        self.patch = Patches(patch_size=(patch_height, patch_width))
        self.restore_layer = Dense(units=patch_area)  # 对于每个patch，从projection_dim还原回patch_area

    def call(self, inputs, training=None, mask=None):
        tans_out = self.transform(inputs)
        patches_hat = self.restore_layer(tans_out)
        out_img = self.patch.reconstruct_from_patch(patches_hat, self.img_width)
        return out_img


class MAE(Model):
    def __init__(self, mask_ratio, img_shape, patch_size, projection_dim, kind, hidden_dims, layer_num, head_size):
        super(MAE, self).__init__()
        self.encoder = MAEEncoder(mask_ratio, img_shape, patch_size, projection_dim, kind, hidden_dims, layer_num,
                                  head_size)
        self.decoder = MAEDecoder(img_shape, patch_size, projection_dim, kind, hidden_dims, layer_num, head_size)

    def call(self, inputs, training=None, mask=None):
        low_dim_embedding = self.encoder(inputs)
        x_hat = self.decoder(low_dim_embedding)
        return x_hat

    def train(self, X, batch_size, epochs, learning_rate, chart_file=""):
        optim = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=1e-4)
        self.compile(optim, loss=losses.square_loss, metrics=['mae', metrics.relative_error])
        # early_stop = EarlyStopping(monitor='val_loss', min_delta=1e-6, patience=20)
        history = self.fit(X, X, batch_size=batch_size, epochs=epochs, callbacks=[], validation_split=0.2)
        draw_train_history(history, chart_file=chart_file)
        # self.summary()


def main(argv):
    corpus = FLAGS.corpus
    data_dim = FLAGS.data_dim
    data_trans = FLAGS.data_trans
    kind = FLAGS.kind
    mask_ratio = FLAGS.mask_ratio

    learning_rate = 1e-4
    batch_size = 256
    epochs = 100
    model_name = "mae"

    normal_data_files, anomaly_data_file, train_history_file, check_file, model_file, scaler_file = gen_outfile(
        data_dim,
        corpus,
        data_trans,
        model_name,
    )
    X, ano_X = preprocess(normal_data_files, anomaly_data_file, data_dim)

    mae = MAE(mask_ratio=mask_ratio, img_shape=(X.shape[1], X.shape[2]), patch_size=(X.shape[1] // 128, 1),
              projection_dim=16,
              kind=kind,
              hidden_dims=[16, 8],
              layer_num=1,
              head_size=8,
              )
    mae.train(X, batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, chart_file=train_history_file)
    save_model(mae, model_file)
    mae = load_model(model_file)

    print("正常样本")
    e1 = fit_err_percentage(X, mae.predict(X, batch_size=10000), losses.square_loss)
    print("故障样本")
    e2 = fit_err_percentage(ano_X, mae.predict(ano_X, batch_size=10000), losses.square_loss)  # jiangnan上能识别70%的异常样本
    draw_fit_error(e1, e2, model_file)
    plt.show()


if __name__ == "__main__":
    app.run(main)

# python .\anomaly_detection\mae.py  --kind fc --corpus cwru --data_dim 1d --data_trans original --mask_ratio 0.75
# python .\anomaly_detection\mae.py  --kind fc --corpus jiangnan --data_dim 1d --data_trans original --mask_ratio 0.75
