'''
Copyright: 
Descripttion: 卷积 自编码  
version: 
Author: chengx
Date: 2022-02-11 15:27:39
LastEditors: chengx
LastEditTime: 2022-03-12 20:11:12
'''
import torch
import numpy as np
import sys
import seaborn as sns
from torch import nn
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader,TensorDataset
from sklearn.preprocessing import StandardScaler
from collections import Counter
from torch.utils.data import DataLoader
import torch.optim as optim
import matplotlib.pyplot as plt 
from sklearn.metrics import accuracy_score,f1_score
from sklearn.metrics import roc_auc_score   
from sklearn.metrics import confusion_matrix


font1 = {'family': 'Times New Roman',
'size': 24,
}

def readData():
    x = np.load('./data/AvrData.npy')
    y = np.load('./data/AvrLabel.npy')
    print('a',Counter(y))
   
    print(x.shape,y.shape,x.shape,y.shape)

    # import scipy.signal
    # x = scipy.signal.savgol_filter(x, 9, 2, deriv=1)  # 一阶导数处理
    #看下label
    # # sns.countplot(y)
    # plt.plot(y)
    # plt.show()

    x_abnom = x[~(y==0)] # 标签不为0的是异常
    x_nom = x[y==0]

    X_train, X_nom_test = train_test_split(x_nom, train_size = 0.5, random_state = 1)
    X_test = np.concatenate([X_nom_test,x_abnom],axis = 0)
    y_test = np.concatenate([np.zeros(len(X_nom_test)),np.ones(len(x_abnom))])


    y_M = np.delete(y,np.argwhere(y == 0),axis=0)
    y_M = np.concatenate([np.zeros(len(X_nom_test)),y_M])#手工计算混淆矩阵的标签


    print('X_train.shape, X_test.shape,y_test.shape',X_train.shape, X_test.shape, y_test.shape)

    X_train,train_loader,X_test = preProcess(X_train,X_test)

    return X_train,train_loader,X_test,y_test

def preProcess(X_train,X_test): # 数据预处理
    sc = StandardScaler()#去均值和方差归一化
    X_train = sc.fit_transform(X_train) # 先在训练集上拟合fit，找到该part的整体指标，然后进行转换transform
    X_test = sc.transform(X_test) # 对剩余的数据采用上面相同的指标进行transform

    X_train,X_test = torch.FloatTensor(X_train),torch.FloatTensor(X_test)# 转化为张量

    X_train = X_train.reshape(X_train.shape[0],1,X_train.shape[1])
    X_test = X_test.reshape(X_test.shape[0],1,X_test.shape[1])

    train_set = TensorDataset(X_train)
    train_loader = DataLoader(dataset=train_set, batch_size=10, shuffle=True)

    return X_train,train_loader,X_test

class Convolutional_Encoder(nn.Module):
    def __init__(self):
        super(Convolutional_Encoder, self).__init__()
        # padding = same 是在边缘补0，与dilation应该是不冲突的
        # ----Encoder ----
        self.conv_1 = nn.Conv1d(in_channels=1, out_channels=16, kernel_size=3,padding='same', dilation=5)# 
        
        self.conv_2 = nn.Conv1d(16, 16, 3,padding='same',dilation=2)# 
        self.conv_3 = nn.Conv1d(16, 16, 3, padding='same')#
        self.AP = nn.AvgPool1d(2,2)

        self.conv_4 = nn.Conv1d(16,12,kernel_size=3,stride=1,padding='same')
        self.conv_5 = nn.Conv1d(12,8,kernel_size=3,stride=1,padding='same')

    def forward(self, x):
        #----encoder-------
        x = self.conv_1(x)
        x = F.leaky_relu(x)
        x = self.AP(x)

        x = self.conv_2(x)
        x = F.leaky_relu(x)
        x = self.AP(x)
        
        x = self.conv_3(x)
        x = F.leaky_relu(x)
        x = self.AP(x)


        # ----Decoder  ----
        x = self.conv_3(x)
        x = F.leaky_relu(x)

        x = self.conv_4(x)
        x = F.leaky_relu(x)

        x = self.conv_5(x)
        x = x.view(-1,1,x.shape[2]*x.shape[1])
        return x


def train(train_loader):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = Convolutional_Encoder().to(device)
    model.train()
    optimizer = torch.optim.Adam(model.parameters(), 0.01)
    loss_fn = nn.MSELoss() # 均方误差，希望解码出来的值和原值越接近越好

    num_epochs = 100
    # 用正常的数据集训练一个自编码器
    for epoch in range(num_epochs):
        for _, (x,) in enumerate(train_loader):
            x_recon = model(x)
            loss = loss_fn(x_recon, x)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print('Epoch {}/{} : loss: {:.4f}'.format(
            epoch + 1, num_epochs ,loss.item()))

    return model


def detec(model,X_train,X_test,y_test):
    model.eval() #不启用 Batch Normalization 和 Dropout
    # 计算重构误差   模型输出值与原始输入之间的均方误差
    def get_recon_err(X):
        MX = model(X)
        MX = MX.reshape(MX.shape[0],-1)
        # print('MX SHAPE 2',MX.shape)#(N,256)
        X = X.reshape(MX.shape[0],-1)
        return torch.mean((MX-X)**2,dim = 1).detach().numpy()
    recon_err_test = get_recon_err(X_test)
    # print('recon_err_test.shape',recon_err_test.shape)# (N)

    recon_err_train = get_recon_err(X_train)
    recon_err_train = recon_err_train.reshape(-1)
    return recon_err_train,recon_err_test


def drawRconError(recon_err_train,recon_err_test,t):
    recon_err = np.concatenate([recon_err_train,recon_err_test])
    labels = np.concatenate([np.zeros(len(recon_err_train)),y_test])
    fig=plt.figure(figsize=(7,5))
    sns.kdeplot(recon_err[labels==0], shade=True,label='Normal')
    sns.kdeplot(recon_err[labels==1], shade=True,label = 'Anormaly')
    plt.vlines(t, 0, 3, linestyles='dashed', colors='red')

    t = round(t, 2)
    plt.text(t,3,'t ='+str(t),font1)

    plt.tick_params(labelsize=22)# 设置坐标轴的字体大小
    plt.legend(loc=0,prop=font1)# 图例在左上方，字体设置
    plt.show()

    return recon_err_test


def getThreshold(y_pred,y_test):
    threshold = np.linspace(0,2,100)
    acc_list = []
    f1_list = []
    t_best = 0
    accMax = 0
    for t in threshold:
        _y_pred = (y_pred>t).astype(np.int)
        acc_list.append(accuracy_score(_y_pred,y_test))
        f1_list.append(f1_score(_y_pred,y_test))
        # 最佳阈值
        if accMax < accuracy_score(_y_pred,y_test):
            accMax = accuracy_score(_y_pred,y_test)
            t_best = t # 相当于先验了t 最好的值


    # 方案2 ： 提取选择阈值，按照阈值来分
    decision = (y_pred > t_best).astype(np.int)
    decision = decision.reshape(-1)

    auc_score = roc_auc_score(y_test, decision)
    print('auc',auc_score)

    tn, fp, fn, tp = confusion_matrix(y_test, decision).ravel()
    print('tn,fp,fn,tp',tn,fp,fn,tp)

    prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
    recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
    f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
    print('prec:',prec)
    print('recall:',recall)
    print('f1:',f1)
    return auc_score,prec,recall,f1,t_best

if __name__ == "__main__":
    AC = []
    PC = []
    RC = []
    FC = []
    for i in range(1):
        X_train,train_loader,x_test,y_test= readData()
        model = train(train_loader)

        recon_err_train,recon_err_test = detec(model,X_train,x_test,y_test)
        auc_score,prec,recall,f1,t_best = getThreshold(recon_err_test,y_test)
        drawRconError(recon_err_train,recon_err_test,t_best)
        AC.append(auc_score)
        PC.append(prec)
        RC.append(recall)
        FC.append(f1)
    print('auc',list(np.round(AC,3)))
    print('precision',list(np.round(PC,3)))
    print('recall',list(np.round(RC,3)))
    print('f1-score',list(np.round(FC,3)))


