import joblib
from imblearn.over_sampling import RandomOverSampler
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn import tree
import numpy as np
from sklearn.model_selection import KFold, cross_val_score
from sklearn.ensemble import GradientBoostingClassifier
from utils import draw
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from collections import Counter
from scipy.io import arff
from models import random_forest
from models import decision_tree
from models import svm
from utils import draw


#读取arff数据集
def data_handle_arff(filename):
    data,meta = arff.loadarff(filename)
    data = pd.DataFrame(data)
    datasets = data.iloc[:, :-1]
    labels = data.iloc[:, -1]
    # 将特征数据转为数组
    datasets = np.array(datasets)
    # print(datasets)
    # 标签的转换为0、1
    category_labels = []
    labels = np.array(labels)
    for i in range(len(labels)):
        if labels[i] == b'clean':
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)
    # print(labels)

    return datasets,category_labels

#读取csv数据集
def data_handle(filename):
    read_data = pd.read_csv(filename)
    # print(read_data)
    list_datasets = []
    category_labels = []
    for i in range(len(read_data)):
        list_data = []
        for j in range(len(read_data.iloc[i, :]) - 1):
            row_data = read_data.iloc[i, j]  # 读取每个样本的每个数据
            list_data.append(row_data)  # 将每个数据存入列表
        list_datasets.append(list_data)  # 将每个样本的数据存入列表

        row_data_label = read_data.iloc[i, len(read_data.iloc[i, :]) - 1]  # 读取每个样本的类别标签
        if row_data_label == "b'clean'":
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)

    # print(list_datasets)
    # print(category_labels)
    return list_datasets, category_labels

def cal_decison_score():
    sum_auc = 0
    auc_list = []
    f1_list = []

    datasets, labels = data_handle('../feature/EQ.csv')  # 处理后的数据`8

    # 计算不同高度下的得分
    score_list = decision_tree.cal_depth_score(datasets, labels)

    # 观察不同树高度下训练结果的各项指标
    for i in range(10):
        data = decision_tree.decision_tree_train(datasets, labels, max_depth=i + 1)
        single_auc = data[0]
        single_f1 = data[1]
        auc_list.append(single_auc)
        f1_list.append(single_f1)
        sum_auc = sum_auc + single_auc
        print("第" + str(i + 1) + "次训练结果")

    # 绘制折线图
    draw.draw_auc_line(auc_list, score_list)

    avg_auc = sum_auc / 10
    print("平均准确率" + str(avg_auc))

cal_decison_score()

