
from dataLoad import load_data, train_data_process, test_data_process
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
# from pyod.models.xgbod import XGBOD
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.svm import SVC

# from baggingPU import BaggingClassifierPU

import json
import os
import numpy as np

import warnings
warnings.filterwarnings("ignore")

def run(data_train, data_test, clf_name):
    # 对于训练数据和测试数据进行处理，X表示变量，Y表示是否含有缺陷
    X_train, y_train = train_data_process(data_train)
    X_test, y_true = test_data_process(data_test)

    # 多种算法模型的加载
    classifiers = {
        # k近邻算法
        "KNeighbors": KNeighborsClassifier(3),
        # 支持向量机回归分析 Support Vector Regression
        "SVC": SVC(random_state=0),
        #高斯过程分类
        "GaussianProcess": GaussianProcessClassifier(1.0 * RBF(1.0)),
        # 决策树模型
        "DecisionTree": DecisionTreeClassifier(random_state=0),
        # 随机森林模型 随机种子为0
        "RandomForest": RandomForestClassifier(random_state=0),
        # 监督学习模型
        "MLP": MLPClassifier(random_state=0),
        # 元估计器模型
        "AdaBoost": AdaBoostClassifier(),
        # 高斯贝叶斯分类器
        "GaussianNB": GaussianNB(),
        # 二次判别分析算法
        "QuadraticDiscriminantAnalysis": QuadraticDiscriminantAnalysis()
    }
    # 加载对应的模型
    clf = classifiers[clf_name]
    try:
        # 运行训练
        clf.fit(X_train, y_train)
        # 得到测试结果，并统计
        y_pred = clf.predict(X_test)
        # 统计最后的结果
        TP = 0
        FN = 0
        FP = 0
        TN = 0
        for i, label in enumerate(y_true):
            if label:
                if y_pred[i]:
                    TP += 1
                else:
                    FN += 1
            else:
                if y_pred[i]:
                    FP += 1
                else:
                    TN += 1
        if (FP + TN) == 0:
            pf = "no negative samples."
        else:
            # 计算最终的PF
            pf = FP / (FP + TN)

        # 通过auc包得到auc值
        try:
            auc = roc_auc_score(y_true, y_pred)
        except ValueError as e:
            auc = str(e)
        #     训练结果
        return {
            'train samples': str(X_train.shape[0]),
            # 'defective train samples': str(np.sum(y_train)),
            'precision': precision_score(y_true, y_pred),
            'recall': recall_score(y_true, y_pred),
            'pf': pf,
            'F-measure': f1_score(y_true, y_pred),
            'accuracy': accuracy_score(y_true, y_pred),
            'AUC': auc
        }

    except ValueError as e:
        return str(e)

if __name__ == '__main__':

    clf_nameList = ['RandomForest', 'KNeighbors', 'SVC', 'GaussianProcess', 'DecisionTree',
                    'RandomForest', 'MLP', 'AdaBoost', 'GaussianNB', 'QuadraticDiscriminantAnalysis']

    for clf_name in clf_nameList:

        dirList = ['NASA', 'AEEEM', 'Relink', 'SOFTLAB']
        # 'MORPH'
        for dir in dirList:

            # for subSet in os.listdir('D:/code/test/dataSet/' + dir +dir)
            for dataset in os.listdir('D:/code/test/dataSetMat/' + dir):
                # D:\code\test\dataSetMat\NASA
                data_name_train = dataset.split('.')[0]
                filepath_train = 'D:/code/test/dataSetMat/' + dir + '/' + data_name_train + '.mat'
                data1_train, data1_test = load_data(filepath_train, data_name_train)
                # data_name_test = dataset + 'test'
                # filepath_test = './data/NASA/NASATest/' + data_name_test + '.mat'
                # data1_test, data2_test, data3_test = load_data(filepath_test, data_name_test)
                result = {
                    'method': clf_name,
                    'train samples': '',

                # sheet.write(2, cnt, result[0]['train samples'])
                # sheet.write(3, cnt, result[0]['defective train samples'])

                    'dataset': dir,
                    'subDataSet': dataset,
                    'result': []
                }
                result['result'].append(run(data1_train, data1_test, clf_name))
                print(result)
                dirs = "./outcome/" + clf_name + '/' + dir + "/"
                if not os.path.exists(dirs):
                    os.makedirs(dirs)
                with open(dirs + data_name_train + ".json", "w") as f:
                    json.dump(result, f, indent=4)
    print("over!")
