# -*- coding: utf-8 -*-
"""
@author: Guansong Pang
The algorithm was implemented using

Python 3.6.6,
Keras 2.2.2 and
TensorFlow 1.10.1.

More details can be found in our KDD19 paper.
Guansong Pang, Chunhua Shen, and Anton van den Hengel. 2019. 
Deep Anomaly Detection with Deviation Networks. 
In The 25th ACM SIGKDDConference on Knowledge Discovery and Data Mining (KDD ’19),
August4–8, 2019, Anchorage, AK, USA.ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3292500.3330871
"""

import numpy as np
np.random.seed(42)
import tensorflow as tf

tf.set_random_seed(42)
sess = tf.Session()

from keras import regularizers
from keras import backend as K
from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, TensorBoard

import argparse
import numpy as np
import matplotlib.pyplot as plt
import sys
from scipy.sparse import vstack, csc_matrix
from utils import dataLoading, aucPerformance, writeResults, get_data_from_svmlight_file
from sklearn.model_selection import train_test_split

import time

MAX_INT = np.iinfo(np.int32).max
data_format = 0

def dev_network_d(input_shape):
    '''
    deeper network architecture with three hidden layers
    def
    '''
    x_input = Input(shape=input_shape)

    # l2 正则化
    intermediate = Dense(1000, activation='relu',
                kernel_regularizer=regularizers.l2(0.01), name = 'hl1')(x_input)
    intermediate = Dense(250, activation='relu',
                kernel_regularizer=regularizers.l2(0.01), name = 'hl2')(intermediate)
    intermediate = Dense(20, activation='relu',
                kernel_regularizer=regularizers.l2(0.01), name = 'hl3')(intermediate)
    intermediate = Dense(1, activation='linear', name = 'score')(intermediate)
    return Model(x_input, intermediate)

def dev_network_s(input_shape):
    '''
    network architecture with one hidden layer
    '''
    x_input = Input(shape=input_shape)
    intermediate = Dense(20, activation='relu',
                kernel_regularizer=regularizers.l2(0.01), name='hl1')(x_input)
    intermediate = Dense(1, activation='linear',  name='score')(intermediate)
    return Model(x_input, intermediate)

def dev_network_linear(input_shape):
    '''
    network architecture with no hidden layer, equivalent to linear mapping from
    raw inputs to anomaly scores
    '''
    x_input = Input(shape=input_shape)
    intermediate = Dense(1, activation='linear',  name='score')(x_input)
    return Model(x_input, intermediate)

def deviation_loss(y_true, y_pred):
    '''
    z-score-based deviation loss
    论文中的网络结构图中的右边部分均在者一块代码中体现...
    '''
    confidence_margin = 5.
    ## size = 5000 is the setting of l in algorithm 1 in the paper
    ref = K.variable(np.random.normal(loc = 0., scale= 1.0, size = 5000) , dtype='float32')
    dev = (y_pred - K.mean(ref)) / K.std(ref)
    inlier_loss = K.abs(dev)
    outlier_loss = K.abs(K.maximum(confidence_margin - dev, 0.))
    return K.mean((1 - y_true) * inlier_loss + y_true * outlier_loss)


def deviation_network(input_shape, network_depth):
    '''
    construct the deviation network-based detection model
    '''
    if network_depth == 4:
        model = dev_network_d(input_shape)
    elif network_depth == 2:
        model = dev_network_s(input_shape)
    elif network_depth == 1:
        model = dev_network_linear(input_shape)
    else:
        sys.exit("The network depth is not set properly")

    # RMSprop -> 优化器选择
    rms = RMSprop(clipnorm=1.)
    model.compile(loss=deviation_loss, optimizer=rms)
    return model


def batch_generator_sup(x, outlier_indices, inlier_indices, batch_size, nb_batch, rng):
    '''
    batch generator
    '''
    rng = np.random.RandomState(rng.randint(MAX_INT, size = 1))
    counter = 0
    while 1:
        if data_format == 0:
            ref, training_labels = input_batch_generation_sup(x, outlier_indices, inlier_indices, batch_size, rng)
        else:
            ref, training_labels = input_batch_generation_sup_sparse(x, outlier_indices, inlier_indices, batch_size, rng)
        counter += 1
        yield(ref, training_labels)
        if (counter > nb_batch):
            counter = 0

'''
    this function is defined for the csv data batch size divide.
'''
def input_batch_generation_sup(x_train, outlier_indices, inlier_indices, batch_size, rng):
    '''
    batchs of samples. This is for csv data.
    Alternates between positive and negative pairs.
    '''
    dim = x_train.shape[1]
    ref = np.empty((batch_size, dim))
    training_labels = []
    n_inliers = len(inlier_indices)
    n_outliers = len(outlier_indices)
    for i in range(batch_size):
        if(i % 2 == 0):
            sid = rng.choice(n_inliers, 1)
            ref[i] = x_train[inlier_indices[sid]]
            training_labels += [0]
        else:
            sid = rng.choice(n_outliers, 1)
            ref[i] = x_train[outlier_indices[sid]]
            training_labels += [1]
    return np.array(ref), np.array(training_labels)

'''
    this function is defined for the libsvm data batch size divide.
'''
def input_batch_generation_sup_sparse(x_train, outlier_indices, inlier_indices, batch_size, rng):
    '''
    batchs of samples. This is for libsvm stored sparse data.
    Alternates between positive and negative pairs.
    '''
    ref = np.empty((batch_size))
    training_labels = []
    n_inliers = len(inlier_indices)
    n_outliers = len(outlier_indices)
    for i in range(batch_size):
        if(i % 2 == 0):
            sid = rng.choice(n_inliers, 1)
            ref[i] = inlier_indices[sid]
            training_labels += [0]
        else:
            sid = rng.choice(n_outliers, 1)
            ref[i] = outlier_indices[sid]
            training_labels += [1]
    ref = x_train[ref, :].toarray()
    return ref, np.array(training_labels)


def load_model_weight_predict(model_name, input_shape, network_depth, x_test):
    '''
    load the saved weights to make predictions
    '''
    model = deviation_network(input_shape, network_depth)
    model.load_weights(model_name)

    # 定义异常得分的网络
    scoring_network = Model(inputs=model.input, outputs=model.output)

    if data_format == 0:
        scores = scoring_network.predict(x_test)
    else:
        data_size = x_test.shape[0]
        scores = np.zeros([data_size, 1])
        count = 512
        i = 0
        while i < data_size:
            subset = x_test[i: count].toarray()
            scores[i: count] = scoring_network.predict(subset)
            if i % 1024 == 0:
                print(i)
            i = count
            count += 512
            if count > data_size:
                count = data_size
        assert count == data_size
    return scores


def inject_noise_sparse(seed, n_out, random_seed):
    '''
    add anomalies to training data to replicate anomaly contaminated data sets.
    we randomly swape 5% features of anomalies to avoid duplicate contaminated anomalies.
    This is for sparse data.
    '''
    rng = np.random.RandomState(random_seed)
    n_sample, dim = seed.shape
    swap_ratio = 0.05
    n_swap_feat = int(swap_ratio * dim)
    seed = seed.tocsc()
    noise = csc_matrix((n_out, dim))
    print(noise.shape)
    for i in np.arange(n_out):
        outlier_idx = rng.choice(n_sample, 2, replace = False)
        o1 = seed[outlier_idx[0]]
        o2 = seed[outlier_idx[1]]
        swap_feats = rng.choice(dim, n_swap_feat, replace = False)
        noise[i] = o1.copy()
        noise[i, swap_feats] = o2[0, swap_feats]
    return noise.tocsr()

def inject_noise(seed, n_out, random_seed):
    '''
    add anomalies to training data to replicate anomaly contaminated data sets.
    随机抽取5%的异常来防止冲重复污染的异常
    we randomly swape 5% features of anomalies to avoid duplicate contaminated anomalies.
    this is for dense data
    '''
    rng = np.random.RandomState(random_seed)
    n_sample, dim = seed.shape
    swap_ratio = 0.05
    n_swap_feat = int(swap_ratio * dim)
    noise = np.empty((n_out, dim))
    for i in np.arange(n_out):
        outlier_idx = rng.choice(n_sample, 2, replace = False)
        o1 = seed[outlier_idx[0]]
        o2 = seed[outlier_idx[1]]
        swap_feats = rng.choice(dim, n_swap_feat, replace = False)
        noise[i] = o1.copy()
        noise[i, swap_feats] = o2[swap_feats]
    return noise

def run_devnet(args):
    names = args.data_set.split(',')
    # names = ['annthyroid_21feat_normalised']
    network_depth = int(args.network_depth)
    random_seed = args.ramdn_seed
    for nm in names:
        runs = args.runs
        rauc = np.zeros(runs)
        ap = np.zeros(runs)
        filename = nm.strip()
        global data_format
        data_format = int(args.data_format)

        if data_format == 0:
            # dataLoading defined in utils.py file
            x, labels = dataLoading(args.input_path + filename + ".csv")
        else:
            x, labels = get_data_from_svmlight_file(args.input_path + filename + ".svm")
            x = x.tcsr()

        outlier_indices = np.where(labels != 0)[0]
        outliers = x[outlier_indices]
        n_outliers_org = outliers.shape[0]

        outlier_val = labels[outlier_indices[0]]

        train_time = 0
        test_time = 0
        for i in np.arange(runs):
            x_train, x_test, y_train, y_test = train_test_split(x,
                                                                labels,
                                                                test_size=0.2,
                                                                random_state=42,
                                                                stratify=labels)
            y_train = np.array(y_train)
            y_test = np.array(y_test)
            print(filename + ': round ' + str(i))
            outlier_indices = np.where(y_train != 0)[0]
            inlier_indices = np.where(y_train == 0)[0]
            n_outliers = len(outlier_indices)
            print("Original training size: %d, No. outliers: %d" % (x_train.shape[0], n_outliers))

            # cont_rate -> 数据噪声的比例
            n_noise = len(np.where(y_train == 0)[0]) * args.cont_rate / (1. - args.cont_rate)
            n_noise = int(n_noise)

            rng = np.random.RandomState(random_seed)
            if data_format == 0:
                if n_outliers > args.known_outliers:
                    mn = n_outliers - args.known_outliers

                    # 如果 异常 数据大于已知的 异常数据，则删除多余的异常数据，与设置的异常数据量保持一致
                    remove_idx = rng.choice(outlier_indices, mn, replace=False)
                    x_train = np.delete(x_train, remove_idx, axis=0)
                    y_train = np.delete(y_train, remove_idx, axis=0)

                noises = inject_noise(outliers, n_noise, random_seed)
                x_train = np.append(x_train, noises, axis=0)

                # 标记数据添加标记为0（正常）的噪声数据
                y_train = np.append(y_train, np.zeros((noises.shape[0], 1)))

            else:       # 非csv数据噪声和异常数据选取
                if n_outliers > args.known_outliers:
                    mn = n_outliers - args.known_outliers
                    remove_idx = rng.choice(outlier_indices, mn, replace=False)
                    retain_idx = set(np.arange(x_train.shape[0])) - set(remove_idx)
                    retain_idx = list(retain_idx)
                    x_train = x_train[retain_idx]
                    y_train = y_train[retain_idx]

                noises = inject_noise_sparse(outliers, n_noise, random_seed)
                x_train = vstack([x_train, noises])
                y_train = np.append(y_train, np.zeros((noises.shape[0], 1)))


            # 异常数据index，训练集中还有一些没有标记为正常的异常数据
            outlier_indices = np.where(y_train != 0)[0]
            # 正常数据index
            inlier_indices = np.where(y_train == 0)[0]

            print('y train sample number: ', y_train.shape[0],
                  ' labeled outlier number: ', outlier_indices.shape[0],
                  '\nnormal dataset sample number: ', inlier_indices.shape[0],
                  ' noise data rows number(not labeled but anomaly): ', n_noise)


            # input_shape = x_train.shape[1:]

            # 训练数据输入量
            n_samples_trn = x_train.shape[0]

            # 异常数据量
            n_outliers = len(outlier_indices)
            print("Training data size: %d, No. outliers: %d" % (x_train.shape[0], n_outliers))


            start_time = time.time()
            input_shape = x_train.shape[1:]
            epochs = args.epochs
            batch_size = args.batch_size
            #
            nb_batch = args.nb_batch
            model = deviation_network(input_shape, network_depth)
            print(model.summary())

            # model_name definition,
            # Here, the filename parameter is the data loaded name.
            model_name = "./model/devnet_"  + \
                         filename + "_" + \
                         str(args.cont_rate) + \
                         "cr_"  + str(args.batch_size) + \
                         "bs_" + str(args.known_outliers) + \
                         "ko_" + str(network_depth) + "d.h5"

            # verbose 定义训练过程中的控制台显示方式

            checkpointer = ModelCheckpoint(model_name, monitor='loss', verbose=0,
                                           save_best_only = True, save_weights_only = True)

            model.fit_generator(batch_generator_sup(x_train, outlier_indices, inlier_indices, batch_size, nb_batch, rng),
                                          steps_per_epoch = nb_batch,
                                          epochs = epochs,
                                          callbacks=[checkpointer])
            train_time += time.time() - start_time

            # prediction begin time
            start_time = time.time()
            scores = load_model_weight_predict(model_name, input_shape,
                                               network_depth, x_test)
            # prediction end time
            test_time += time.time() - start_time

            # 评价指标 auc_roc & auc_pr
            # print(scores.shape, y_test.shape)

            rauc[i], ap[i] = aucPerformance(scores, y_test, label_val=outlier_val)

        mean_auc = np.mean(rauc)
        std_auc = np.std(rauc)
        mean_aucpr = np.mean(ap)
        std_aucpr = np.std(ap)

        # train_time累积时间和
        train_time = train_time / runs
        test_time = test_time / runs
        print("average AUC-ROC for dataset file-%s: %.4f, average AUC-PR: %.4f" % (filename, mean_auc, mean_aucpr))
        print("average runtime for dataset file-%s: %.4f seconds" % (filename, train_time + test_time))
        # writeResults 工具类

        writeResults('filename-networkDepth',
                     'origin-data-shape-0',
                     'origin-data-shape-1',
                     'sample_trained_number',
                     'outlier-number-of-origin-data',
                     'outlier-number-after-trancation',
                     'network-depth',
                     'mean-auc',
                     'mean-aucpr',
                     'standard-variance-of-auc',
                     'standard-variance-of-aucpr',
                     'train-time-sum',
                     'test-time-sum',
                     path=args.output)

        writeResults(filename + '_' + str(network_depth),
                     x.shape[0], x.shape[1],
                     n_samples_trn,
                     n_outliers_org,
                     n_outliers,
                     network_depth,
                     mean_auc,
                     mean_aucpr,
                     std_auc,
                     std_aucpr,
                     train_time,
                     test_time,
                     path=args.output)


parser = argparse.ArgumentParser()
parser.add_argument("--network_depth", choices=['1', '2', '4'], default='2',
                    help="the depth of the network architecture")

# 设置每个batch的size
# 总的训练数 512 * 20 = 10240
parser.add_argument("--batch_size", type=int, default=512,
                    help="batch size used in SGD")

# 每个epoch的batch个数
parser.add_argument("--nb_batch", type=int, default=50,
                    help="the number of batches per epoch")

parser.add_argument("--epochs", type=int, default=100,
                    help="the number of epochs")

# runs -> 程序实验的次数，以获得平均的性能
parser.add_argument("--runs", type=int, default=10,
                    help="how many times we repeat the experiments to obtain the average performance")

# 设置已知的outliers标记的数量
# 但是这个值可能小于实际数据集中的异常标记数，在代码中，我们将随机去除超过的标记进行训练
parser.add_argument("--known_outliers", type=int, default=290,
                    help="the number of labeled outliers available at hand")

# 训练数据中污染数据比例
parser.add_argument("--cont_rate", type=float, default=0.02,
                    help="the outlier contamination rate in the training data")

# 输入数据路径
parser.add_argument("--input_path", type=str, default='./dataset/processed-railway-data/',
                    help="the path of the data sets")

# dataset names
parser.add_argument("--data_set", type=str, default='20180608-848-0-GJHS-396-400-processed-sawtooth_anomaly4,'
                                                    '20180608-848-0-GJHS-396-400-processed-sin-multiple_extension_anomaly3,'
                                                    '20180608-848-0-GJHS-396-400-processed-sin-anomaly2,'
                                                    '20180608-848-0-GJHS-396-400-processed-linear-anomaly1',
                    help="a list of data set names")

# parser.add_argument("--data_set", type=str, default='railway_detection_data',
#                     help="a list of data set names")
# parser.add_argument("--data_set", type=str, default='annthyroid_21feat_normalised',
#                     help="a list of data set names")

# 0 -> csv and 1 -> libsvm
parser.add_argument("--data_format", choices=['0','1'], default='0',
                    help="specify whether the input data is a csv (0) or libsvm (1) data format")

parser.add_argument("--output", type=str,
                    # default='./results/devnet_auc_performance_30outliers_0.02contrate_2depth_10runs.csv',
                    default='./railway/2020-4-23-devnet_railway_auc_performance_290outliers_0.02contrate_2depth_10runs.csv',
                    help="the output file path")

parser.add_argument("--ramdn_seed", type=int, default=42,
                    help="the random seed number")
args = parser.parse_args()
# run_devnet(args)


def main(args):
    run_devnet(args)

if __name__ == '__main__':
    run_devnet(args)

    # file_path = './dataset/creditcard.csv'
    # import pandas as pd
    #
    # data_frame = pd.read_csv(file_path)
    # print(data_frame.head())


    # python RGAN.py --settings_file kdd99

    # python devnet.py --dataset creditcard.csv
    # E:\pyhcarm_workspace\devNet\me\timlong\devNet
