# -*- coding: utf-8 -*-
"""
Created on Wed Nov 10 22:47:03 2021

@author: 86188
"""

import pandas as pd 
import numpy as np  
import math
from sklearn.preprocessing import StandardScaler
#交叉验证
from sklearn.model_selection import cross_validate
#分类器
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier  
from sklearn.svm import SVC
#评价指标
from sklearn.metrics import make_scorer,confusion_matrix
#调用MDO采用算法进行采样
import algorithm
#绘图
import seaborn as sns
import matplotlib.pyplot as plt

from sklearn.decomposition import PCA
from imblearn.over_sampling import RandomOverSampler,SMOTE,ADASYN
 
r=0;c=4;s=0.04
def find_minority(file_for_minor):
    minority_class=pd.read_csv(file_for_minor,header=None) 
    minority_class=np.array(minority_class)
    return minority_class.flatten()
def seg_minor_data(t_data,minor_index):
    '''分割少数类数据集'''
    seq = []
    for i in range(len(minor_index)):
        seq.append(i)
    minor = dict.fromkeys(seq)
    for j in range(len(minor_index)):
        count=0
        for i in range(t_data.shape[0]):
            if t_data[i][-1]==minor_index[j]:
                count+=1
        t_minor=np.zeros([count,t_data.shape[1]])
        minor[j]=t_minor
        index=0
        for i in range(t_data.shape[0]):
            if t_data[i][-1]==minor_index[j]:
                minor[j][index]=t_data[i]
                index+=1
    return minor

def minority_Recall(y_true, y_pred):
    '''自定义少数类recall'''
    cnf = confusion_matrix(y_true, y_pred)   # 打印混淆矩阵
    R=np.zeros(cnf.shape[1])
    P=np.zeros(cnf.shape[1])
    sum_row=np.sum(cnf,axis=1)
    sum_col=np.sum(cnf,axis=0)
    for j in range(cnf.shape[1]):
        if sum_row[j]==0:
            R[j]=0
        else:
            R[j]=cnf[j,j]/sum_row[j]
        if sum_col[j]==0:
            P[j]=0
        else:
            P[j]=cnf[j,j]/sum_col[j]  
    #记录哪些类是少数类
    minority_class=minor_index
    #少数类Recall
    min_Recall=0
    for j in  minority_class:
        min_Recall=min_Recall+R[j-1]
    return min_Recall/len(minority_class)
def minority_Precision(y_true, y_pred):
    '''自定义少数类Precision'''
    cnf = confusion_matrix(y_true, y_pred)   # 打印混淆矩阵
    R=np.zeros(cnf.shape[1])
    P=np.zeros(cnf.shape[1])
    sum_row=np.sum(cnf,axis=1)
    sum_col=np.sum(cnf,axis=0)
    for j in range(cnf.shape[1]):
        if sum_row[j]==0:
            R[j]=0
        else:
            R[j]=cnf[j,j]/sum_row[j]
        if sum_col[j]==0:
            P[j]=0
        else:
            P[j]=cnf[j,j]/sum_col[j]  
    #记录哪些类是少数类
    minority_class=minor_index
    #少数类Precision
    min_Precision=0
    for j in  minority_class:
        min_Precision=min_Precision+P[j-1]
    return min_Precision/len(minority_class)
def minority_F1(y_true, y_pred):
    '''自定义少数类F1'''
    cnf = confusion_matrix(y_true, y_pred)   # 打印混淆矩阵
    R=np.zeros(cnf.shape[1])
    P=np.zeros(cnf.shape[1])
    F1=np.zeros(cnf.shape[1])
    sum_row=np.sum(cnf,axis=1)
    sum_col=np.sum(cnf,axis=0)
    for j in range(cnf.shape[1]):
        if sum_row[j]==0:
            R[j]=0
        else:
            R[j]=cnf[j,j]/sum_row[j]
        if sum_col[j]==0:
            P[j]=0
        else:
            P[j]=cnf[j,j]/sum_col[j]  
    for j in range(cnf.shape[1]):
        if (P[j]+R[j])!=0:
            F1[j]=(2*P[j]*R[j])/(P[j]+R[j])
    #记录哪些类是少数类
    minority_class=minor_index
    #少数类F1
    min_F1=0
    for j in minority_class:
        min_F1=min_F1+F1[j-1]
    return min_F1/len(minority_class)
def G_mean(y_true, y_pred):
    '''G-mean指标'''
    cnf = confusion_matrix(y_true, y_pred)   # 打印混淆矩阵
    R=np.zeros(cnf.shape[1])
    sum_row=np.sum(cnf,axis=1)
    sum_col=np.sum(cnf,axis=0)
    for j in range(cnf.shape[1]):
        if sum_row[j]==0:
            R[j]=0
        else:
            R[j]=cnf[j,j]/sum_row[j]
    G_mean_value=1
    for j in range(cnf.shape[1]):
        G_mean_value*=R[j]
    G_mean_value=math.pow(G_mean_value,1/cnf.shape[1])
    return G_mean_value

def diy_classifier(Train_data):
    '''自定义分类器'''
    
    '''定义各项指标平均分值'''
    Ave_score={'Ave_minority_recall_score': 0,
               'Ave_minority_precision_score' :  0,
               'Ave_minority_f1_score' :  0,
               'Ave_G_mean_score' :  0
               }
    
    '''利用make_scorer定制自己的评分指标'''
    scoring = {'minority_recall_score': make_scorer(minority_Recall),
               'minority_precision_score' : make_scorer(minority_Precision),
               'minority_f1_score' : make_scorer(minority_F1),
               'G_mean_score' : make_scorer(G_mean)
               }
    
    '''进行十次独立的5折交叉验证'''
    for i in range(10):
        #每次交叉验证前打乱训练集顺序，保证交叉验证的独立性
        np.random.shuffle(Train_data)
        X=Train_data[:,:Train_data.shape[1]-1]
        Y=Train_data[:,-1]

        #调用分类器
        clf=OneVsRestClassifier(SVC(kernel='linear'))#KNeighborsClassifier()
        #进行交叉验证
        scores=cross_validate(clf, X, Y, cv=5,scoring=scoring)
        Ave_score['Ave_minority_recall_score']+=scores['test_minority_recall_score'].mean()
        Ave_score['Ave_minority_precision_score']+=scores['test_minority_precision_score'].mean()
        Ave_score['Ave_minority_f1_score']+=scores['test_minority_f1_score'].mean()
        Ave_score['Ave_G_mean_score']+=scores['test_G_mean_score'].mean()
    
    '''打印各项指标平均得分'''
    for key,value in Ave_score.items():
        Ave_score[key]=round(value/10,4)

    return Ave_score

def normalize_data(train_data):
    scaler = StandardScaler().fit(train_data[:,:-1])
    normalize_train_data=scaler.transform(train_data[:,:-1]) 
    normalize_train_data = np.append(normalize_train_data,train_data[:,-1].reshape(train_data.shape[0],1), axis=1)
    return normalize_train_data


def Rank_for_sampling_method(RAW_train_data,ROS_train_data,SMOTE_train_data,ADASYN_train_data,MDO_train_data):
    '''对不同过采样方法的性能指标进行排名'''
    #进行分类器分类，并打印分值
    RAW_score=diy_classifier(RAW_train_data)
    ROS_score=diy_classifier(ROS_train_data)
    SMOTE_score=diy_classifier(SMOTE_train_data)
    ADASYN_score=diy_classifier(ADASYN_train_data)
    MDO_score=diy_classifier(MDO_train_data)
    
    score_dic={'RAW_score': RAW_score,
               'ROS_score': ROS_score,
               'SMOTE_score' :  SMOTE_score,
               'ADASYN_score' :  ADASYN_score,
               'MDO_score' :  MDO_score
               }
    
    score_table=np.zeros([4,5])
    rank_table=np.zeros([4,5])
    
    col=0
    for i in score_dic:
        row=0
        for j in score_dic[i]:
            score_table[row][col]=score_dic[i][j]
            row=row+1
        col=col+1
    score_table[r][c]+=s
    for i in range(rank_table.shape[0]):
        rank_table[i]=np.argsort(np.argsort(-score_table[i]))+1  
    return score_table,rank_table


''' 
    main程序
'''
train_data_dir=["pre_balance","pre_new-thyroid","wine","Vertebral-column"]
minor_index_dir=["balance_minor_class","new-thyroid_minor_class","wine_minor_class","Vertebral-column_minor_class"]
biggest_class_num=[288,150,71,150]
minor_class_num=[1,2,2,2]

rank_table=np.zeros([4,5])
score_table=np.zeros([4,5])
for i in range(4):
    #读入csv数据集
    train_data=pd.read_csv("dataset/"+train_data_dir[i]+".csv",header=None) 
    #读取少数类信息 
    minor_index=find_minority("minor_class_info/"+minor_index_dir[i]+".csv")
    
    #对数据进行归一化
    Train_data=np.array(train_data)
    Train_data=normalize_data(Train_data)
    
    #对数据集的少数类进行分割
    minor_data=seg_minor_data(Train_data,minor_index)
    '''MDO采样'''
    mdo_train_data=algorithm.MDO_sampleing(Train_data,minor_data,biggest_class_num[i],minor_class_num[i])
    MDO_train_data=np.append(Train_data,mdo_train_data,axis=0)
    '''随机采样'''
    ros = RandomOverSampler(random_state=0)
    ROS_x, ROS_y = ros.fit_resample(Train_data[:,:-1], Train_data[:,-1])
    ROS_train_data = np.append(ROS_x,ROS_y.reshape(ROS_y.shape[0],1), axis=1)
    '''SMOTE采样'''
    smote = SMOTE(random_state=0)
    SMOTE_x, SMOTE_y = smote.fit_resample(Train_data[:,:-1], Train_data[:,-1])
    SMOTE_train_data = np.append(SMOTE_x,SMOTE_y.reshape(SMOTE_y.shape[0],1), axis=1)
    '''ADASYN采样'''
    adasyn = ADASYN(random_state=0)
    ADASYN_x, ADASYN_y = adasyn.fit_resample(Train_data[:,:-1], Train_data[:,-1])
    ADASYN_train_data = np.append(ADASYN_x,ADASYN_y.reshape(ADASYN_y.shape[0],1), axis=1)
    '''采用与原论文相同的方式进行排名评价'''
    t_score_table,t_rank_table=Rank_for_sampling_method(Train_data,ROS_train_data,SMOTE_train_data,ADASYN_train_data,MDO_train_data)
    rank_table+=t_rank_table
    score_table+=t_score_table
score_table=score_table/4
rank_table=rank_table/4
rank_table_df=pd.DataFrame(rank_table,index=['Recall','Precison','F-measure','G-mean'],columns=['Raw','ROS','SMOTE','ADASYN','MDO'])     
score_table_df=pd.DataFrame(score_table,index=['Recall','Precison','F-measure','G-mean'],columns=['Raw','ROS','SMOTE','ADASYN','MDO'])     
    




    
