import joblib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from imblearn.over_sampling import SMOTE
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from collections import Counter
from scipy.io import arff
from models import random_forest
from models import decision_tree
from models import svm
from utils import draw

#读取arff数据集
def data_handle_arff(filename):
    data,meta = arff.loadarff(filename)
    data = pd.DataFrame(data)
    datasets = data.iloc[:, :-1]
    labels = data.iloc[:, -1]
    # 将特征数据转为数组
    datasets = np.array(datasets)
    # print(datasets)
    # 标签的转换为0、1
    category_labels = []
    labels = np.array(labels)
    for i in range(len(labels)):
        if labels[i] == b'clean':
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)
    # print(labels)

    return datasets,category_labels

#读取csv数据集
def data_handle(filename):
    read_data = pd.read_csv(filename)
    # print(read_data)
    list_datasets = []
    category_labels = []
    for i in range(len(read_data)):
        list_data = []
        for j in range(len(read_data.iloc[i, :]) - 1):
            row_data = read_data.iloc[i, j]  # 读取每个样本的每个数据
            list_data.append(row_data)  # 将每个数据存入列表
        list_datasets.append(list_data)  # 将每个样本的数据存入列表

        row_data_label = read_data.iloc[i, len(read_data.iloc[i, :]) - 1]  # 读取每个样本的类别标签
        if row_data_label == "b'clean'":
            category_labels.append(0)  # 将二分类标签转化为0和1,0代表软件正常，1代表软件缺陷
        else:
            category_labels.append(1)

    # print(list_datasets)
    # print(category_labels)
    return list_datasets, category_labels

#读入数据集后绘图
def draw_read_data(data):
    # 将数据转换为DataFrame格式
    df = pd.DataFrame(data[0])
    # 绘制箱型图
    # plt.figure(figsize=(10, 6))  # 设置图形大小
    df.boxplot()  # 绘制箱型图
    plt.title('Boxplot of ARFF Data')  # 设置标题
    plt.xlabel('Attributes')  # 设置x轴标签
    plt.ylabel('Values')  # 设置y轴标签
    plt.show()  # 显示箱型图

#绘制折线图
def draw_auc_line(auc_array,f1_array):
    x_array = list(range(1,11))
    plt.plot(x_array,auc_array,x_array,f1_array)
    plt.show()

#rd
def rd(datasets, labels):
    return random_forest.random_forest_train(datasets, labels)

#dt
def dt(datasets,labels):
    sum_auc = 0
    auc_list = []
    f1_list = []

    # # 计算不同高度下的得分
    # decision_tree.cal_depth_score(datasets, labels)
    #
    # # 观察不同树高度下训练结果的各项指标
    # for i in range(10):
    #     data = decision_tree.decision_tree_train(datasets, labels, max_depth=i + 1)
    #     single_auc = data[0]
    #     single_f1 = data[1]
    #     auc_list.append(single_auc)
    #     f1_list.append(single_f1)
    #     sum_auc = sum_auc + single_auc
    #     print("第" + str(i + 1) + "次训练结果")
    #
    # # 绘制折线图
    # draw_auc_line(auc_list, f1_list)
    #
    # avg_auc = sum_auc / 10
    # print("平均准确率" + str(avg_auc))

    return decision_tree.decision_tree_train(datasets, labels,4)

#gbdt
def gbdt(datasets,labels):
    return decision_tree.gbdt_train(datasets, labels)

#svm
def SVM(datasets,labels):
    return svm.svm_train(datasets, labels)

#预测结果并输出
def predict(datasets,labels,model_num):
    test_dict = {
        '1': 'random.pkl',
        '2': 'decision.pkl',
        '3': 'gbdt.pkl',
        '4': 'svm.pkl'
    }

    result_dict = {
        '1': 'random_result.txt',
        '2': 'decision_result.txt',
        '3': 'gbdt_result.txt',
        '4': 'svm.txt'
    }


    X_test = datasets[:]
    clf0 = joblib.load("./files/" + test_dict.get(model_num))
    y_predict = clf0.predict(X_test)  # 使用分类器对测试集进行预测
    np.savetxt('./files/'+result_dict.get(model_num), y_predict)

    # 计算预测正确的百分比并打印
    # accuracy = np.mean(y_predict == labels) * 100
    # macro_recall = metrics.recall_score(labels, y_predict, average='macro') * 100
    # print(f"预测正确的百分比: {accuracy}%")
    # print(f"预测召回率: {accuracy}%")


    auc = np.mean(y_predict == labels) * 100
    macro = metrics.precision_score(labels, y_predict, average='macro')
    micro = metrics.precision_score(labels, y_predict, average='micro')
    macro_recall = metrics.recall_score(labels, y_predict, average='macro')
    weighted = metrics.f1_score(labels, y_predict, average='weighted')
    print('准确率:', auc)  # 预测准确率输出
    print('宏平均精确率:', macro)  # 预测宏平均精确率输出
    print('微平均精确率:', micro)  # 预测微平均精确率输出
    print('宏平均召回率:', macro_recall)  # 预测宏平均召回率输出
    print('平均F1-score:', weighted)  # 预测平均f1-score输出
    print('混淆矩阵输出:\n', metrics.confusion_matrix(labels, y_predict))  # 混淆矩阵输出
    print('分类报告:', metrics.classification_report(labels, y_predict))  # 分类报告输出
    draw.plot_roc(labels, y_predict, auc, macro, macro_recall, weighted)  # 绘制ROC曲线并求出AUC值



    # 绘制饼状图
    Counter(y_predict)  # {label:sum(label)}
    Yes = sum(y_predict == 1)
    No = sum(y_predict == 0)
    plt.rcParams['font.sans-serif'] = 'SimHei'  # 设置中文显示
    plt.figure(figsize=(6, 6))  # 将画布设定为正方形，则绘制的饼图是正圆
    label = ['有缺陷数', '无缺陷数']  # 定义饼图的标签，标签是列表
    explode = [0.01, 0.05]  # 设定各项距离圆心n个半径
    values = [Yes, No]
    plt.pie(values, explode=explode, labels=label, autopct='%1.1f%%')  # 绘制饼图
    plt.title('缺陷数目')
    plt.show()

    return auc


#开始
print('是否启用预处理后的数据集：（输入0不启用，输入其他启用）')
process_num  = input('请输入：')
data_dict = {}
if process_num == '0':
    data_dict = {
        '1': 'EQ.arff',
        '2': 'JDT.arff',
        '3': 'LC.arff',
        '4': 'ML.arff',
        '5': 'PDE.arff'
    }
else:
    data_dict = {
        '1': 'EQ.csv',
        '2': 'JDT.csv',
        '3': 'LC.csv',
        '4': 'ML.csv',
        '5': 'PDE.csv'
    }

print('输入1~5选择用作训练的数据集：')
for i in data_dict:
    print(i+':'+data_dict.get(i))

data_num = input('请输入：')
data_selected_train = data_dict.get(data_num)
print('选择了'+data_selected_train)

if process_num == '0':
    data_selected_train = 'dataSet/AEEEM/' + data_selected_train
else:
    data_selected_train = 'feature/' + data_selected_train

#读取数据集
if process_num == '0':
    datasets, labels = data_handle_arff(data_selected_train)
else:
    datasets, labels = data_handle(data_selected_train)


count = 0
for i in labels:
    if i == 1:
        count = count+1
print("无缺陷是有缺陷的"+str((len(labels)-count)/count)+"倍")

#选择训练模型
model_dict = {
    '1': rd,
    '2': dt,
    '3': gbdt,
    '4': SVM
}
model_list = ['1:random_forest','2:decision_tree','3:GBDT','4:SVM']
print('输入1~4选择模型：')
for i in model_list:
    print(i)
model_num = input('请输入：')
train_auc,train_f1 = model_dict.get(model_num)(datasets, labels)

#预测
predict_dict = {}

if process_num == '0':
    predict_dict = {
        '1': 'EQ.arff',
        '2': 'JDT.arff',
        '3': 'LC.arff',
        '4': 'ML.arff',
        '5': 'PDE.arff'
    }
else:
    predict_dict = {
        '1': 'EQ.csv',
        '2': 'JDT.csv',
        '3': 'LC.csv',
        '4': 'ML.csv',
        '5': 'PDE.csv'
    }
for i in predict_dict:
    print(i+':'+predict_dict.get(i))

print('输入1~5选择要预测的数据集：')
predict_num = input('请输入：')
data_selected_predict = predict_dict.get(predict_num)
print('选择了'+ data_selected_predict)

if process_num == '0':
    datasests_predict, labels_predict = data_handle_arff('dataSet/AEEEM/' + data_selected_predict)
else:
    datasests_predict, labels_predict = data_handle('feature/' + data_selected_predict)

predict_auc = predict(datasests_predict, labels_predict, model_num)


# # #保存一次的数据
#
#
# # 构造表示数据的字符串
# if process_num == '0':
#     status_str = '未处理数据'
# else:
#     status_str = '已处理数据'
# data_str = '选择训练集：{}'.format(data_dict.get(data_num))
# model_str = '选择模型：{}'.format(model_list[int(model_num)])
# train_acc_str = '训练准确率：{:.2f}'.format(train_auc)
# train_f1_str = '训练召回率：{:.2f}'.format(train_f1)
# predict_data_str = '预测数据集：{}'.format(predict_dict.get(predict_num))
# predict_auc_str = '预测正确百分比：{:.2f}%'.format(predict_auc)
#
# # 组合所有字符串为一个列表
# str_list = [status_str, data_str, model_str, train_acc_str, train_f1_str, predict_data_str, predict_auc_str,'\n']
#
# # 将所有字符串连接成一个输出字符串
# result_str = status_str + " " + data_str + " " + \
#              model_str + " " + train_acc_str + " " + train_f1_str + " " + predict_data_str + " " + predict_auc_str
#
# # 如果文件存在，追加内容
# with open('output.txt', 'a') as file:
#     file.write(result_str + '\n')


