'''
不同机器学习算法
'''
import numpy as np
import pandas as pd

import xgboost as xgb

import keras
from keras.models import Sequential
from keras.layers import Input,Reshape,Conv1D,MaxPooling1D,Flatten,Dense, Dropout,Activation 
from keras.optimizers import SGD

from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix, accuracy_score

from sklearn.svm import SVC
from imblearn.under_sampling import RandomUnderSampler,ClusterCentroids
from imblearn.over_sampling import SMOTE, ADASYN


def NN_model(data):     #-------------------使用ANN作为计算准确率的模型-------------------#
    '''利用ANN模型计算精确度与标准差'''
    #  读取数据
    column_counts = data.shape[1]-1   #列的最大索引值
    x = data[:,range(column_counts)]
    y = data[:,column_counts]
    np.random.seed(200)
    np.random.shuffle(x)
    np.random.seed(200)
    np.random.shuffle(y)
            
    test_score = []   #验证集分数

    for i in range(20):    
        kfold = StratifiedKFold(n_splits=5,shuffle=True,random_state=i)
        for train,test in kfold.split(x,y):
            # 对train数据进行欠采样/过采样
            smote = SMOTE()
            x_train_resampled,y_train_resampled = smote.fit_resample(x[train],y[train])
            # 数据预处理
            y_train = keras.utils.to_categorical(y_train_resampled, num_classes=2)
            y_test = keras.utils.to_categorical(y[test], num_classes=2)

            #DNN模型构建、训练与预测
            model = Sequential()
            model.add(Dense(32, activation='relu', input_dim=column_counts))
            model.add(Dropout(0.2))
            model.add(Dense(32, activation='relu'))  
            model.add(Dropout(0.2))   
            model.add(Dense(32, activation='relu'))    
            model.add(Dropout(0.2))
            model.add(Dense(32, activation='relu'))    
            model.add(Dropout(0.2))
            model.add(Dense(2, activation='softmax'))
            
            sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy']) 

            model.fit(x_train_resampled, y_train,
                    epochs=200,  # 数据被轮次数
                    batch_size=128,
                    validation_data=(x[test], y_test)) 
            # 测试集准确率
            test_score.append(model.evaluate(x[test], 
                y_test, verbose=0, batch_size=128)[1]*100)   
            # 测试集混淆矩阵
            y_test_predict = model.predict_classes(x[test])
            print(confusion_matrix(y[test],y_test_predict))

    # 去掉最低分与最高分
    test_score.sort()    #升序排序
    print('len test score:',len(test_score))###
    min_max = 1    #去除最小最大值的个数
    print('test score:',test_score[min_max:-min_max])###

    # 求均值与方差
    mean_acc = np.mean(test_score[min_max:-min_max])
    mean_square = np.std(test_score[min_max:-min_max])
    
    return mean_acc,mean_square


def xgb_model(data):   #-------------------使用XGBoost作为计算准确率的模型-------------------#
    '''利用XGBoost模型计算精确度、标准差和指标重要性'''
    # 读取数据
    column_counts = data.shape[1]-1   #列的最大索引值
    x = data[:,range(column_counts)]    #特征
    y = data[:,column_counts]   #标签
    
    np.random.seed(200)
    np.random.shuffle(x)
    np.random.seed(200)
    np.random.shuffle(y) 
    
    test_score = []   #验证集分数
    index_importance = []  #指标重要性

    for i in range(10):    
        kfold = StratifiedKFold(n_splits=5,shuffle=True,random_state=i)
        for train,test in kfold.split(x_scaled,y):
            # 对train数据进行欠采样/过采样
            smote = SMOTE()
            x_train_resampled,y_train_resampled = smote.fit_resample(x[train],y[train])
            # 数据预处理
            x_test = x[test]
            y_test = y[test]
            # xgb模型构建、训练与预测
            tabModel = xgb.XGBClassifier(max_depth=6, 
			                learning_rate=0.1, 
			                n_estimators=100, 
			                silent=True, 
			                objective='binary:logistic',
   			                scale_pos_weight=1)
            tabModel.fit(x_train_resampled,y_train_resampled)
            
            # 测试集混淆矩阵
            y_test_predict = tabModel.predict(x_test)
            print(confusion_matrix(y_test,y_test_predict))
            # 测试集的准确率
            test_accuracy = accuracy_score(y_test,y_test_predict)
            print("accuarcy: %.2f%%" % (test_accuracy*100.0))
            
            test_score.append(test_accuracy) 
            index_importance.append(tabModel.feature_importances_)
        
            
    # 指标重要程度
    count = len(test_score)
    for i in range(0, count):   # 使用冒泡排序
        for j in range(i+1, count):
            if test_score[i] < test_score[j]:
                test_score[i], test_score[j] = test_score[j], test_score[i]
                index_importance[i], index_importance[j] = index_importance[j], index_importance[i]
    
    count_use = int(count*0.75)+1
    index_importance_use = index_importance[:count_use]    #取测试集分数前75%模型对应的指标重要性
    test_score_use =  test_score  
    
    index_importance_maen = np.mean(np.array(index_importance_use),axis=0)
    print('index importance:',index_importance_maen.tolist())

    # 求均值与方差
    print('test score:',test_score_use)###
    mean_acc = np.mean(test_score_use)
    mean_square = np.std(test_score_use)
    
    return mean_acc,mean_square,index_importance_maen.tolist()