'''
Copyright: 
Descripttion: 空洞卷积 自编码
version: 
Author: chengx
Date: 2022-02-11 16:58:32
LastEditors: chengx
LastEditTime: 2022-03-09 21:41:06
'''
from sklearn.metrics import roc_auc_score   
import sys
import torch.nn.functional as F
import seaborn as sns
import torch
import torch.nn as nn
from torch.utils.data import DataLoader,TensorDataset
import numpy as np  
import matplotlib.pyplot as plt 
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from collections import Counter
from sklearn.metrics import accuracy_score,f1_score

font1 = {'family': 'Times New Roman',
'size': 22,
}

def readData():
    x = np.load('./data/AvrData.npy')
    y = np.load('./data/AvrLabel.npy')
    # x = x[:,30:210]
    print(x.shape,y.shape)

    import scipy.signal
    x = scipy.signal.savgol_filter(x, 9, 2, deriv=1)  # 一阶导数处理

    x_abnom = x[~(y==0)] # 标签不为0的是异常
    x_nom = x[y==0]

    X_train, X_nom_test = train_test_split(x_nom, train_size = 0.5, random_state = 5)
    X_test = np.concatenate([X_nom_test,x_abnom],axis = 0)
    y_test = np.concatenate([np.zeros(len(X_nom_test)),np.ones(len(x_abnom))])

    print('X_train.shape, X_test.shape,y_test.shape',X_train.shape, X_test.shape, y_test.shape)
    
    x_train,x_test,train_set,train_loader = preProcess(X_train,X_test)

    return x_train, x_test, y_test,train_set,train_loader,X_nom_test

def preProcess(X_train,X_test): # 数据预处理
    sc = StandardScaler()#去均值和方差归一化
    X_train = sc.fit_transform(X_train) # 先在训练集上拟合fit，找到该part的整体指标，然后进行转换transform
    X_test = sc.transform(X_test) # 对剩余的数据采用上面相同的指标进行transform

    X_train,X_test = torch.FloatTensor(X_train),torch.FloatTensor(X_test)# 转化为张量

    X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],1)
    X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],1)

    train_set = TensorDataset(X_train)
    train_loader = DataLoader(dataset=train_set, batch_size=10, shuffle=True)

    return X_train,X_test,train_set,train_loader

# 分支网络
class Inception(nn.Module):
    # 初始化网络模型
    def __init__(self, in_channels, c1, c2, c3,c4, **kwargs):
        super(Inception, self).__init__(**kwargs)
        # 分支1
        self.p1_1 = nn.Conv1d(in_channels, c1, kernel_size=3,padding='same')
        # 分支2
        self.p2_1 = nn.Conv1d(in_channels, c2[0], kernel_size=3,padding='same',dilation=2)
        self.p2_2 = nn.Conv1d(c2[0], c2[1], kernel_size=3,padding='same',dilation=2)
        # 分支3
        self.p3_1 = nn.Conv1d(in_channels, c3[0], kernel_size=3,padding='same',dilation=5)
        self.p3_2 = nn.Conv1d(c3[0], c3[1], kernel_size=3, padding='same',dilation=5)
        # 分支4
        self.p4_1 = nn.Conv1d(in_channels, c4[0], kernel_size=3,padding='same',dilation=12)
        self.p4_2 = nn.Conv1d(c4[0], c4[1], kernel_size=3, padding='same',dilation=12)
        # 池化
        self.pool1 = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        self.pool2 = nn.AvgPool1d(kernel_size=3, stride=2, padding=1)

    # 前向传播
    def forward(self, x):
        p1 = F.elu(self.p1_1(x))
        p2 = F.elu(self.p2_2(F.elu(self.p2_1(x))))
        p3 = F.elu(self.p3_2(F.elu(self.p3_1(x))))
        p4 = F.elu(self.p4_2(F.elu(self.p4_1(x))))

        # print('p1-3',p1.shape,p2.shape,p3.shape,p4.shape)
        cat = torch.cat((p1, p2, p3, p4),dim=1)

        cat = cat.permute(0,2,1)
        cat2 = self.pool1(cat)
        cat2 = cat2.permute(0,2,1)
        # print('cat shape',cat.shape,cat2.shape)
        # 在通道维度上连结输出
        return cat2


def creatAE(input_size):#搭建模型
    # encoder
    b1 = nn.Sequential(
        # encode
        Inception(input_size,16, (32,16), (32,16),(32,16)),

        nn.Conv1d(32,8,1),
        nn.ELU(),
    )
    # decoder
    b2 = nn.Sequential(
        # nn.Conv1d(8,16,1),
        # nn.ELU(),
        nn.Conv1d(8,64,1),
        nn.ELU(),
        nn.Conv1d(64,input_size,1),
    )
    model = nn.Sequential(b1, b2)

    num_epochs = 60
    optimizer = torch.optim.Adam(model.parameters(), 0.01)
    loss_func = nn.MSELoss() # 均方误差，希望解码出来的值和原值越接近越好
    # 用正常的数据集训练一个自编码器
    for epoch in range(num_epochs):
        total_loss = 0.
        for step, (x,) in enumerate(train_loader):
            x_recon = model(x)
            loss = loss_func(x_recon, x)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()*len(x)
        total_loss /= len(train_set)
            
        print('Epoch {}/{} : loss: {:.4f}'.format(
            epoch + 1, num_epochs ,loss.item()))

    return model

def standard(x):# 归一化
    fm = np.max(x)-np.min(x)
    for i in range(x.shape[0]):
        x[i] = (x[i] - float(np.min(x)))
        x[i] = x[i]/fm
    return x

def detec(model,X_train,X_test,y_test):
    # 计算重构误差   模型输出值与原始输入之间的均方误差
    def get_recon_err(X):
        return torch.mean((model(X)-X)**2,dim = 1).detach().numpy()
    recon_err_test = get_recon_err(X_test)
    recon_err_test = recon_err_test.reshape(-1)
    recon_err_train = get_recon_err(X_train)
    recon_err_train = recon_err_train.reshape(-1)

    def draw(): # 误差分布
        recon_err = np.concatenate([recon_err_train,recon_err_test])
        labels = np.concatenate([np.zeros(len(recon_err_train)),y_test])

        fig=plt.figure(figsize=(9,6))
        sns.kdeplot(recon_err[labels==0], shade=True,label='Normal')
        sns.kdeplot(recon_err[labels==1], shade=True,label = 'Anormaly')

        plt.tick_params(labelsize=22)# 设置坐标轴的字体大小
        plt.legend(loc=0,prop=font1)# 图例在左上方，字体设置
        plt.show()
    # draw()
    return recon_err_test


def mm(y_pred,y_true,X_nom_test):

    # 案1：直接按照排名前(正常样本个数)的地方为分界线，比此处重构误差大的算为异常
    decision=np.zeros((y_pred.shape[0]))
    index = np.argsort(y_pred)
    decision[index[0:X_nom_test.shape[0]]]=0 #正常的
    decision[index[X_nom_test.shape[0]:]]=1

    # 方案2 ： 提取选择正常阈值的95%，按照阈值来分

    auc_score = roc_auc_score(y_test, decision)
    print('auc',auc_score)

    tn, fp, fn, tp = confusion_matrix(y_test, decision).ravel()
    print('tn,fp,fn,tp',tn,fp,fn,tp)

    prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
    recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
    f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
    print('prec:',prec)
    print('recall:',recall)
    print('f1:',f1)
    return auc_score,prec,recall,f1


if __name__ == '__main__':
    AC = []
    PC = []
    RC = []
    FC = []
    for i in range(10):
        x_train, x_test, y_test,train_set,train_loader,X_nom_test= readData()
        input_size = x_train.shape[1]
        
        model = creatAE(input_size)
        y_pred = detec(model,x_train,x_test,y_test)

        auc_score,prec,recall,f1 = mm(y_pred,y_test,X_nom_test)
        AC.append(auc_score)
        PC.append(prec)
        RC.append(recall)
        FC.append(f1)

    print('auc',list(np.round(AC,3)))
    print('precision',list(np.round(PC,3)))
    print('recall',list(np.round(RC,3)))
    print('f1-score',list(np.round(FC,3)))
