'''
Copyright: 
Descripttion: 
version: 
Author: chengx
Date: 2021-10-14 10:02:11
LastEditors: chengx
LastEditTime: 2022-03-04 16:18:18
'''
from cProfile import label
from itertools import count
import sys
import seaborn as sns
import torch
import torch.nn as nn
from torch.utils.data import DataLoader,TensorDataset
import numpy as np  
import matplotlib.pyplot as plt 
from sklearn.base import TransformerMixin # To create new classes for transformations
from sklearn.preprocessing import (FunctionTransformer, StandardScaler)

from sklearn.model_selection import train_test_split
from collections import Counter


def readData():
    x = np.load('./data/AvrData.npy')
    y = np.load('./data/AvrLabel.npy')
    print('样本分布',Counter(y))

    #看下label
    # plt.plot(y)
    # plt.show()

    x_abnom = x[~(y==0)] # 标签不为0的是异常
    x_nom = x[y==0]

    X_train, X_nom_test = train_test_split(x_nom, train_size = 0.5, random_state = 1)
    X_test = np.concatenate([X_nom_test,x_abnom],axis = 0)
    y_test = np.concatenate([np.zeros(len(X_nom_test)),np.ones(len(x_abnom))])
    # 标签为0的是正常，为1的是不正常

    y_M = np.delete(y,np.argwhere(y == 0),axis=0)
    y_M = np.concatenate([np.zeros(len(X_nom_test)),y_M])#手工计算混淆矩阵的标签


    print('X_train.shape, X_test.shape,y_test.shape',X_train.shape, X_test.shape, y_test.shape)
    
    x_train,x_test,train_set,train_loader = preProcess(X_train,X_test)

    return x_train, x_test, y_test,train_set,train_loader,X_nom_test,y_M

def preProcess(X_train,X_test): # 数据预处理
    sc = StandardScaler()#去均值和方差归一化
    X_train = sc.fit_transform(X_train) # 先在训练集上拟合fit，找到该part的整体指标，然后进行转换transform
    X_test = sc.transform(X_test) # 对剩余的数据采用上面相同的指标进行transform

    X_train,X_test = torch.FloatTensor(X_train),torch.FloatTensor(X_test)# 转化为张量

    train_set = TensorDataset(X_train)
    train_loader = DataLoader(dataset=train_set, batch_size=10, shuffle=True)

    return X_train,X_test,train_set,train_loader


def creatAE(input_size):#搭建模型
    
    model = nn.Sequential(
        nn.Linear(input_size, 6),
        # nn.Tanh(),
        nn.ELU(),
        nn.Linear(6, 3),
        # nn.Tanh(),
        nn.ELU(),
        nn.Linear(3, 6),
        # nn.Tanh(),
        nn.ELU(),
        nn.Linear(6, input_size)
    )

    num_epochs = 100
    optimizer = torch.optim.Adam(model.parameters(), 0.001)
    loss_func = nn.MSELoss() # 均方误差，希望解码出来的值和原值月接近越好
    # 用正常的数据集训练一个自编码器
    for epoch in range(num_epochs):
        total_loss = 0.
        for step, (x,) in enumerate(train_loader):
            x_recon = model(x)
            loss = loss_func(x_recon, x)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()*len(x)
        total_loss /= len(train_set)
            
        print('Epoch {}/{} : loss: {:.4f}'.format(
            epoch + 1, num_epochs ,loss.item()))

    return model

def detec(X_train,X_test,y_test):
    # 计算重构误差   模型输出值与原始输入之间的均方误差

    def get_recon_err(X):
        return torch.mean((model(X)-X)**2,dim = 1).detach().numpy()
    recon_err_train = get_recon_err(X_train)
    recon_err_test = get_recon_err(X_test)


    recon_err = np.concatenate([recon_err_train,recon_err_test])
    labels = np.concatenate([np.zeros(len(recon_err_train)),y_test])

    font1 = {'family': 'Times New Roman',
        'size': 24,
        }
    fig=plt.figure(figsize=(9,6))
    sns.kdeplot(recon_err[labels==0], shade=True,label='Normal')
    sns.kdeplot(recon_err[labels==1], shade=True,label = 'Anormaly')
    # plt.xticks([0,0.25,0.5,0.75,1,1.25,1.5,1.75,2],[0,0.25,0.5,0.75,1,1.25,1.5,1.75,2])
    plt.tick_params(labelsize=20)# 设置坐标轴的字体大小
    plt.legend(loc=0,prop=font1)# 图例在左上方，字体设置
    plt.show()

    from sklearn.metrics import roc_auc_score   
    auc_score = roc_auc_score(y_test, recon_err_test)
    print('auc',auc_score)

    return recon_err_test


def mm(y_pred,y_true,X_nom_test,y_M):
    # 确定阈值
    print('y_pred shape',y_pred.shape) # y_pred shape (207,)
    from sklearn.metrics import confusion_matrix
    decision=np.zeros((y_pred.shape[0]))
    index = np.argsort(y_pred) # 从小到大排序后的下标索引

    decision[index[0:X_nom_test.shape[0]]]=0 #正常的
    decision[index[X_nom_test.shape[0]:]]=1

    print('decision',Counter(decision))
    # get_confusion_matrix(y_M,decision)

    tn, fp, fn, tp = confusion_matrix(y_true, decision).ravel()
    print('tn,fp,fn,tp',tn,fp,fn,tp)

    prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
    recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
    f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
    print('prec:',prec)
    print('recall:',recall)
    print('f1:',f1)


if __name__ == '__main__':
    x_train, x_test, y_test,train_set,train_loader,X_nom_test,y_M= readData()

    input_size = x_train.shape[1]
    model = creatAE(input_size)
    y_pred = detec(x_train,x_test,y_test)
    mm(y_pred,y_test,X_nom_test,y_M)