'''
Copyright: 
Descripttion: 空洞卷积 自编码
version: 
Author: chengx
Date: 2022-02-11 16:58:32
LastEditors: chengx
LastEditTime: 2022-03-12 19:38:02
'''
from sklearn.metrics import roc_auc_score   
import sys
import seaborn as sns
import torch
import torch.nn as nn
from torch.utils.data import DataLoader,TensorDataset
import numpy as np  
import matplotlib.pyplot as plt 
from sklearn.preprocessing import (FunctionTransformer, StandardScaler)
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from collections import Counter
from sklearn.metrics import accuracy_score,f1_score

font1 = {'family': 'Times New Roman',
'size': 22,
}

def readData():
    x = np.load('./data/AvrData.npy')
    y = np.load('./data/AvrLabel.npy')
   
    print(x.shape,y.shape)
    import scipy.signal
    # x = scipy.signal.savgol_filter(x, 9, 2, deriv=1)  # 一阶导数处理

    x_abnom = x[~(y==0)] # 标签不为0的是异常
    x_nom = x[y==0]

    X_train, X_nom_test = train_test_split(x_nom, train_size = 0.5, random_state = 1)
    X_test = np.concatenate([X_nom_test,x_abnom],axis = 0)
    y_test = np.concatenate([np.zeros(len(X_nom_test)),np.ones(len(x_abnom))])


    y_M = np.delete(y,np.argwhere(y == 0),axis=0)
    y_M = np.concatenate([np.zeros(len(X_nom_test)),y_M])#手工计算混淆矩阵的标签


    print('X_train.shape, X_test.shape,y_test.shape',X_train.shape, X_test.shape, y_test.shape)
    
    x_train,x_test,train_set,train_loader = preProcess(X_train,X_test)

    return x_train, x_test, y_test,train_set,train_loader,X_nom_test,y_M

def preProcess(X_train,X_test): # 数据预处理
    sc = StandardScaler()#去均值和方差归一化
    X_train = sc.fit_transform(X_train) # 先在训练集上拟合fit，找到该part的整体指标，然后进行转换transform
    X_test = sc.transform(X_test) # 对剩余的数据采用上面相同的指标进行transform

    X_train,X_test = torch.FloatTensor(X_train),torch.FloatTensor(X_test)# 转化为张量

    X_train = X_train.reshape(X_train.shape[0],1,X_train.shape[1])
    X_test = X_test.reshape(X_test.shape[0],1,X_test.shape[1])

    train_set = TensorDataset(X_train)
    train_loader = DataLoader(dataset=train_set, batch_size=10, shuffle=True)

    return X_train,X_test,train_set,train_loader

def creatAE():#搭建模型
    model = nn.Sequential(
        # encode
        nn.Conv1d(1,16,5),
        nn.ELU(),
        nn.MaxPool1d(2, stride=2),
        nn.Conv1d(16,16,3),
        nn.ELU(),
        nn.MaxPool1d(2, stride=2),

        nn.Conv1d(16,16,1),# 汇总
        # decode
        nn.Upsample(scale_factor=2, mode='nearest'),
        nn.ConvTranspose1d(16,16,3),
        nn.ELU(),
        nn.Upsample(scale_factor=2, mode='nearest'),
        nn.ConvTranspose1d(16,1,5),
        nn.ELU(),

    )
    # print(model.parameters())

    num_epochs = 30
    optimizer = torch.optim.Adam(model.parameters(), 0.001)
    loss_func = nn.MSELoss() # 均方误差，希望解码出来的值和原值越接近越好
    # 用正常的数据集训练一个自编码器
    for epoch in range(num_epochs):
        total_loss = 0.
        for step, (x,) in enumerate(train_loader):
            x_recon = model(x)
            loss = loss_func(x_recon, x)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()*len(x)
        total_loss /= len(train_set)
            
        print('Epoch {}/{} : loss: {:.4f}'.format(
            epoch + 1, num_epochs ,loss.item()))

    return model

def detec(model,X_train,X_test,y_test):
    # 计算重构误差   模型输出值与原始输入之间的均方误差
    def get_recon_err(X):
        MX = model(X)
        X = X.reshape(X.shape[0],-1)
        MX = MX.reshape(MX.shape[0],-1)
        print(MX.shape,X.shape)
        return torch.mean((MX-X)**2,dim = 1).detach().numpy()
    recon_err_test = get_recon_err(X_test)
    recon_err_train = get_recon_err(X_train)

    recon_err_train = recon_err_train.reshape(-1)
    recon_err_test = recon_err_test.reshape(-1)
    print(recon_err_test)
    return recon_err_train,recon_err_test

def draw(recon_err_train,recon_err_test,t):
    recon_err = np.concatenate([recon_err_train,recon_err_test])
    labels = np.concatenate([np.zeros(len(recon_err_train)),y_test])

    fig=plt.figure(figsize=(7,5))
    
    sns.kdeplot(recon_err[labels==0], shade=True,label='Normal')
    sns.kdeplot(recon_err[labels==1], shade=True,label = 'Anormaly')

    plt.vlines(t, 0, 8, linestyles='dashed', colors='red')
    t = round(t, 2)
    plt.text(t,8,'t ='+str(t),font1)

    plt.tick_params(labelsize=22)# 设置坐标轴的字体大小
    plt.legend(loc=0,prop=font1)# 图例在左上方，字体设置
    plt.show()


def mm(y_pred):
    threshold = np.linspace(0,2,100)
    acc_list = []
    f1_list = []
    t_best = 0
    accMax = 0
    for t in threshold:
        _y_pred = (y_pred>t).astype(np.int)
        acc_list.append(accuracy_score(_y_pred,y_test))
        f1_list.append(f1_score(_y_pred,y_test))
        # 最佳阈值
        if accMax < accuracy_score(_y_pred,y_test):
            accMax = accuracy_score(_y_pred,y_test)
            t_best = t # 相当于先验了t 最好的值

    # 方案2 ： 提取选择阈值，按照阈值来分
    decision = (y_pred>t_best).astype(np.int)

    auc_score = roc_auc_score(y_test, decision)
    print('auc',auc_score)

    tn, fp, fn, tp = confusion_matrix(y_test, decision).ravel()
    print('tn,fp,fn,tp',tn,fp,fn,tp)

    prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
    recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
    f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
    print('prec:',prec)
    print('recall:',recall)
    print('f1:',f1)
    return auc_score,prec,recall,f1,t_best


if __name__ == '__main__':
    AC = []
    PC = []
    RC = []
    FC = []
    for i in range(10):
        x_train, x_test, y_test,train_set,train_loader,X_nom_test,y_M= readData()

        model = creatAE()
        recon_err_train,recon_err_test = detec(model,x_train,x_test,y_test)
        auc_score,prec,recall,f1,t_best = mm(recon_err_test)
        draw(recon_err_train,recon_err_test,t_best)
        AC.append(auc_score)
        PC.append(prec)
        RC.append(recall)
        FC.append(f1)

    print('auc',list(np.round(AC,3)))
    print('precision',list(np.round(PC,3)))
    print('recall',list(np.round(RC,3)))
    print('f1-score',list(np.round(FC,3)))