# import pandas as pd
# from util.dataDealing import preprocessor
# import models
#
# # 训练使用的数据集
# clean_csv_data = 'dataset/CleanedData/clean_data.csv'
# original_data, original_X, original_Y, combined_training_data, x_train, x_test, y_train, y_test = preprocessor(clean_csv_data)
# all_data = [original_data, original_X, original_Y, combined_training_data, x_train, x_test, y_train, y_test]
#
# rf_clf = models.random_forest(*all_data)
# svm_clf = models.svm(*all_data)
# dt_clf = models.decision_tree(*all_data)
# nb_clf = models.nb(*all_data)
# ada_clf = models.ada(*all_data)
# cnn_clf = models.cnn(*all_data)
#
# from sklearn.metrics import *
# from sklearn.model_selection import *
#
# rf_val = []
# svm_val = []
# dt_val = []
# nb_val = []
# ada_val = []
# cnn_val = []
# model_name = []
#
#
# def get_info(model):  # nn,cnn,svm,clf
#     if model == cnn_clf:
#         x_val_matrix = x_val.values
#         x_val1 = x_val_matrix.reshape(x_val_matrix.shape[0], 1, len(x_val.columns), 1)
#         y_pred_on_val = model.predict(x_val1) > 0.5
#         x_test_matrix = x_test.values
#         x_test1 = x_test_matrix.reshape(x_test_matrix.shape[0], 1, len(x_test.columns), 1)
#         y_pred_on_test = model.predict(x_test1) > 0.5
#     else:
#         y_pred_on_val = model.predict(x_train)
#         y_pred_on_test = model.predict(x_test)
#     if (model == rf_clf):
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         rf_val.append(temp_val)
#     if model == svm_clf:
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         svm_val.append(temp_val)
#     if model == dt_clf:
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         dt_val.append(temp_val)
#     if (model == nb_clf):
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         nb_val.append(temp_val)
#     if (model == ada_clf):
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         ada_val.append(temp_val)
#     if (model == cnn_clf):
#         temp_val = []
#         temp_val.append(accuracy_score(y_test, y_pred_on_test))
#         temp_val.append(f1_score(y_test, y_pred_on_test))
#         temp_val.append(precision_score(y_test, y_pred_on_test))
#         temp_val.append(recall_score(y_test, y_pred_on_test))
#         temp_val.append(roc_auc_score(y_test, y_pred_on_test))
#         cnn_val.append(temp_val)
#     print('******', str(model), '******')
#     print('||Validation Set||')
#     print('Accuracy:', accuracy_score(y_train, y_pred_on_val))
#     print('Avg Precision:', average_precision_score(y_train, y_pred_on_val))
#     print('f1_score:', f1_score(y_train, y_pred_on_val))
#     print('Precision:', precision_score(y_train, y_pred_on_val))
#     print('Recall:', recall_score(y_train, y_pred_on_val))
#     print('ROC_AUC:', roc_auc_score(y_train, y_pred_on_val))
#     print("y: ", y_test)
#     print("pred: ", y_pred_on_test)
#     print('||Test Set||')
#     print('Accuracy:', balanced_accuracy_score(y_test, y_pred_on_test))
#     print('Avg Precision:', average_precision_score(y_test, y_pred_on_test))
#     print('f1_score:', f1_score(y_test, y_pred_on_test))
#     print('Precision:', precision_score(y_test, y_pred_on_test))
#     print('Recall:', recall_score(y_test, y_pred_on_test))
#     print('ROC_AUC:', roc_auc_score(y_test, y_pred_on_test))
#     y_pred_on_val_df = pd.DataFrame(y_pred_on_val, columns=['defects1'])
#     y_pred_on_test_df = pd.DataFrame(y_pred_on_test, columns=['defects1'])
#     val_result = pd.concat([y_train['label'].reset_index(drop=True), y_pred_on_val_df['defects1']], axis=1)
#     val_result = val_result.rename(columns={'label': 'val_actual', 'defects1': 'val_predict'})
#     test_result = pd.concat([y_test['label'].reset_index(drop=True), y_pred_on_test_df['defects1']], axis=1)
#     test_result = test_result.rename(columns={'label': 'test_actual', 'defects1': 'test_predict'})
#     return val_result, test_result
#
#
# rf_val_result, rf_test_result = get_info(rf_clf)
# svm_val_result, svm_test_result = get_info(svm_clf)
# dt_val_result, dt_test_result = get_info(dt_clf)
# nb_val_result, nb_test_result = get_info(nb_clf)
# ada_val_result, ada_test_result = get_info(ada_clf)
# cnn_val_result, cnn_test_result = get_info(cnn_clf)
#
# # 数据连接
# new_val_set_x = pd.concat([svm_val_result['val_predict'], rf_val_result['val_predict'], nb_val_result['val_predict']],
#                           axis=1)
# new_val_set_x_matrix = new_val_set_x.values
# new_val_set_y_matrix = svm_val_result['val_actual'].values
#
# new_test_set_x = pd.concat(
#     [svm_test_result['test_predict'], rf_test_result['test_predict'], nb_test_result['test_predict']], axis=1)
# new_test_set_x_matrix = new_test_set_x.values
# new_test_set_y_matrix = svm_test_result['test_actual'].values
#
#
# def send_classifiers_to_LR_file():
#     return svm_clf, rf_clf, nb_clf, cnn_clf, dt_clf, ada_clf
#
#
# from sklearn.linear_model import LogisticRegression
# from mlxtend.classifier import StackingClassifier
#
#
# def send_results_to_logistic_regression():
#     clf = LogisticRegression(random_state=0)
#     clf = StackingClassifier(classifiers=[rf_clf, svm_clf, nb_clf, ada_clf, dt_clf],
#                              # use_probas=True, 类别概率值作为meta-classfier的输入
#                              # average_probas=False,  是否对每一个类别产生的概率值做平均
#                              meta_classifier=LogisticRegression())
#     clf.fit(new_val_set_x_matrix, new_val_set_y_matrix)
#     return clf, new_test_set_x_matrix
#
# from pyecharts.charts import Radar
# schema=[
#     {"name":"准确率","max":1,"min":0},
#     {"name":"F1值","max":1,"min":0},
#     {"name":"精确率","max":1,"min":0},
#     {"name":"召回率","max":1,"min":0},
#     {"name":"AUC","max":1,"min":0}
# ]
# print(svm_val)
# radar=Radar()
# radar.add_schema(schema=schema,shape="polygon")
# radar.add("SVM",svm_val,color="red")
# radar.add("ADA",ada_val,color="blue")
# radar.add("CNN",cnn_val,color="yellow")
# radar.add("NB",nb_val,color="green")
# radar.add("RF",rf_val,color="pink")
# radar.render("Radar.html")