# -*- coding: utf-8 -*-

import seaborn as sns
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import add,Input,Conv1D,Activation,Flatten,Dense,Dropout,LeakyReLU
import tensorflow as tf
import tensorflow_addons as tfa
import pandas as pd
from sklearn import preprocessing
data_o = pd.read_csv('../2017-2018_5min.csv')
# data_o = pd.read_csv('../number20_2016-2019.csv')
data_o=data_o.dropna()
# data_o = data_o.drop(index=range(102598, 104614))
# data_o = data_o.reset_index(drop=True)
# label = data_o.iloc[:,0]

chack_data = data_o.iloc[:,:]
#%%chack_point
# chack_point = chack_data.iloc[126625:127410,:]
# plt.plot(chack_point)
#%%
# Data pre-processing 
# Split the training and test sets 
dataset_train = chack_data.iloc[:105120,:]
dataset_test = chack_data.iloc[105120:,:]


scaler = preprocessing.MinMaxScaler() # 归一化

X_train = pd.DataFrame(scaler.fit_transform(dataset_train), # Find the mean and standard deviation of X_train and apply them to X_train
                              columns=dataset_train.columns,
                              index=dataset_train.index)

# Random shuffle training data
X_train.sample(frac=1)

X_test = pd.DataFrame(scaler.transform(dataset_test),
                             columns=dataset_test.columns,
                             index=dataset_test.index)

#%%
# Build AutoEncoding model


# Build AutoEncoding model
def AutoEncoder_build(model, X_train, act_func):
    tf.random.set_seed(10)

    inputs = tf.keras.Input(shape=(X_train.shape[1],))

    x = tf.keras.layers.Dense(28, activation=act_func,
                              kernel_initializer='glorot_uniform',
                              kernel_regularizer=tf.keras.regularizers.l2(0.0))(inputs)

    x = tf.keras.layers.Dense(10, activation=act_func,
                              kernel_initializer='glorot_uniform')(x)

    x = tf.keras.layers.Dense(28, activation=act_func,
                              kernel_initializer='glorot_uniform')(x)

    outputs = tf.keras.layers.Dense(X_train.shape[1],
                                    kernel_initializer='glorot_uniform')(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs)

    model.compile(loss='mse', optimizer='adam')

    print(model.summary())
    tf.keras.utils.plot_model(model, show_shapes=True)

    return model

model = tf.keras.Sequential()
model = AutoEncoder_build(model=model, X_train=X_train, act_func='elu')
#%%
def AutoEncoder_main(model,Epochs,BATCH_SIZE,validation_split):
    # Train model for 100 epochs, batch size of 10:
	# noise
    factor = 0.1
    X_train_noise = X_train + factor * np.random.normal(0,1,X_train.shape)
    X_train_noise = np.clip(X_train_noise,0.,1.)
    
    history=model.fit(np.array(X_train_noise),np.array(X_train),
                      batch_size=BATCH_SIZE,
                      epochs=Epochs,
                      shuffle=True,
                      validation_split=validation_split, # Training set ratio
#                       validation_data=(X_train,X_train), # Validation set
                      verbose = 1)

    return history

# Figure
def plot_AE_history(history):
    plt.plot(history.history['loss'],
             'b',
             label='Training loss')
    plt.plot(history.history['val_loss'],
             'r',
             label='Validation loss')
    plt.legend(loc='upper right')
    plt.xlabel('Epochs')
    plt.ylabel('Loss, [mse]')
    plt.ylim([0,.1])
    plt.show()
    
history = AutoEncoder_main(model=model,Epochs=10,BATCH_SIZE=64,validation_split=0.05)

plot_AE_history(history)
#%%
X_pred = model.predict(np.array(X_train))
X_pred = pd.DataFrame(X_pred,
                      columns=X_train.columns)
X_pred.index = X_train.index

scored = pd.DataFrame(index=X_train.index)
scored['Loss_mae'] = np.mean(np.abs(X_pred-X_train), axis = 1)
plt.figure()
sns.distplot(scored['Loss_mae'],
             bins = 10,
             kde= True,
            color = 'blue')
plt.xlim([0.0,.1])

X_pred = model.predict(np.array(X_test))
X_pred = pd.DataFrame(X_pred,
                      columns=X_test.columns)
X_pred.index = X_test.index


threshod = 0.076
scored = pd.DataFrame(index=X_test.index)
scored['Loss_mae'] = np.mean(np.abs(X_pred-X_test), axis = 1)
scored['Threshold'] = threshod
scored['Anomaly'] = scored['Loss_mae'] > scored['Threshold']
scored.head()

X_pred_train = model.predict(np.array(X_train))
X_pred_train = pd.DataFrame(X_pred_train,
                      columns=X_train.columns)
X_pred_train.index = X_train.index

scored_train = pd.DataFrame(index=X_train.index)
scored_train['Loss_mae'] = np.mean(np.abs(X_pred_train-X_train), axis = 1)
scored_train['Threshold'] = threshod
scored_train['Anomaly'] = scored_train['Loss_mae'] > scored_train['Threshold']
scored = pd.concat([scored_train, scored])
scored.plot(logy=True,  figsize = (10,6), ylim = [1e-2,1e2], color = ['blue','red'])
