
import pandas as pd
import base.datadealing as preprocess
import base.models as models

original_data, original_X, original_Y,combined_training_data,x_train,x_test,y_train,y_test = preprocess.my_sdp_preprocessor()
all_data = [original_data, original_X, original_Y,combined_training_data,x_train,x_test,y_train,y_test]
#

# cnn_clf = models.cnn(*all_data)
svm_clf = models.svm(*all_data)
rf_clf = models.random_forest(*all_data)
# nn_clf = models.NN(*all_data)
nb_clf = models.nb(*all_data)

from sklearn.metrics import *
from sklearn.model_selection  import *

svm_val=[]
rf_val=[]
nn_val=[]
cnn_val=[]
nb_val=[]
model_name=[]

def get_info(model): #nn,cnn,svm,clf
    if (model == nn_clf):
        y_pred_on_val = model.predict(x_val) >0.5
        y_pred_on_test = model.predict(x_test) >0.5
    elif (model == cnn_clf):
        x_val_matrix = x_val.values
        x_val1 = x_val_matrix.reshape(x_val_matrix.shape[0], 1, len(x_val.columns), 1)
        y_pred_on_val = model.predict(x_val1) >0.5
        x_test_matrix = x_test.values
        x_test1 = x_test_matrix.reshape(x_test_matrix.shape[0], 1, len(x_test.columns), 1)
        y_pred_on_test = model.predict(x_test1) >0.5
    else:
        y_pred_on_val = model.predict(x_train)
        y_pred_on_test = model.predict(x_test)
    if(model == svm_clf):
        temp_val = []
        temp_val.append(accuracy_score(y_test, y_pred_on_test))
        temp_val.append(f1_score(y_test,y_pred_on_test))
        temp_val.append(precision_score(y_test,y_pred_on_test))
        temp_val.append(recall_score(y_test, y_pred_on_test))
        temp_val.append(roc_auc_score(y_test,y_pred_on_test))
        svm_val.append(temp_val)
    if(model == rf_clf):
        temp_val = []
        temp_val.append(accuracy_score(y_test, y_pred_on_test))
        temp_val.append(f1_score(y_test, y_pred_on_test))
        temp_val.append(precision_score(y_test, y_pred_on_test))
        temp_val.append(recall_score(y_test, y_pred_on_test))
        temp_val.append(roc_auc_score(y_test, y_pred_on_test))
        rf_val.append(temp_val)
    if(model == nn_clf):
        temp_val = []
        temp_val.append(accuracy_score(y_test, y_pred_on_test))
        temp_val.append(f1_score(y_test, y_pred_on_test))
        temp_val.append(precision_score(y_test, y_pred_on_test))
        temp_val.append(recall_score(y_test, y_pred_on_test))
        temp_val.append(roc_auc_score(y_test, y_pred_on_test))
        nn_val.append(temp_val)
    if(model == cnn_clf):
        temp_val = []
        temp_val.append(accuracy_score(y_test, y_pred_on_test))
        temp_val.append(f1_score(y_test, y_pred_on_test))
        temp_val.append(precision_score(y_test, y_pred_on_test))
        temp_val.append(recall_score(y_test, y_pred_on_test))
        temp_val.append(roc_auc_score(y_test, y_pred_on_test))
        cnn_val.append(temp_val)
    if(model == nb_clf):
        temp_val = []
        temp_val.append(accuracy_score(y_test, y_pred_on_test))
        temp_val.append(f1_score(y_test, y_pred_on_test))
        temp_val.append(precision_score(y_test, y_pred_on_test))
        temp_val.append(recall_score(y_test, y_pred_on_test))
        temp_val.append(roc_auc_score(y_test, y_pred_on_test))
        nb_val.append(temp_val)
    print('******', str(model), '******')
    print('||Validation Set||')
    print('Accuracy:',accuracy_score(y_train,y_pred_on_val))
    print('Avg Precision:', average_precision_score(y_train,y_pred_on_val))
    print('f1_score:', f1_score(y_train,y_pred_on_val))
    print('Precision:', precision_score(y_train,y_pred_on_val))
    print('Recall:', recall_score(y_train, y_pred_on_val))
    print('ROC_AUC:',roc_auc_score(y_train,y_pred_on_val))
    print("y: ",y_test)
    print("pred: ",y_pred_on_test)
    print('||Test Set||')
    print('Accuracy:',balanced_accuracy_score(y_test,y_pred_on_test))
    print('Avg Precision:', average_precision_score(y_test,y_pred_on_test))
    print('f1_score:', f1_score(y_test,y_pred_on_test))
    print('Precision:', precision_score(y_test,y_pred_on_test))
    print('Recall:', recall_score(y_test, y_pred_on_test))
    print('ROC_AUC:',roc_auc_score(y_test,y_pred_on_test))
    y_pred_on_val_df = pd.DataFrame(y_pred_on_val, columns=['defects1'])
    y_pred_on_test_df = pd.DataFrame(y_pred_on_test, columns=['defects1'])
    val_result = pd.concat([y_train['L'].reset_index(drop=True), y_pred_on_val_df['defects1']],axis=1)
    val_result = val_result.rename(columns={'L':'val_actual', 'defects1':'val_predict'})
    test_result = pd.concat([y_test['L'].reset_index(drop=True),y_pred_on_test_df['defects1']],axis=1)
    test_result = test_result.rename(columns={'L':'test_actual','defects1':'test_predict'})
    return val_result, test_result


svm_val_result, svm_test_result = get_info(svm_clf)
rf_val_result, rf_test_result = get_info(rf_clf)
# nn_val_result, nn_test_result = get_info(nn_clf)
# cnn_val_result, cnn_test_result = get_info(cnn_clf)
nb_val_result, nb_test_result = get_info(nb_clf)


#数据连接
#,nn_val_result['val_predict'],cnn_val_result['val_predict']
new_val_set_x = pd.concat([svm_val_result['val_predict'],rf_val_result['val_predict'],nb_val_result['val_predict']],axis=1)
new_val_set_x_matrix = new_val_set_x.values
new_val_set_y_matrix = svm_val_result['val_actual'].values

new_test_set_x = pd.concat([svm_test_result['test_predict'],rf_test_result['test_predict'],nb_test_result['test_predict']],axis=1)
new_test_set_x_matrix = new_test_set_x.values
new_test_set_y_matrix = svm_test_result['test_actual'].values

def send_classifiers_to_LR_file():
    return svm_clf,rf_clf,nb_clf,nn_clf, cnn_clf

from sklearn.linear_model import LogisticRegression
from mlxtend.classifier import StackingClassifier

def send_results_to_logistic_regression():
    clf = LogisticRegression(random_state=0)
    clf = StackingClassifier(classifiers=[rf_clf,svm_clf,nb_clf],
                              # use_probas=True, 类别概率值作为meta-classfier的输入
                              # average_probas=False,  是否对每一个类别产生的概率值做平均
                              meta_classifier=LogisticRegression())
    clf.fit(new_val_set_x_matrix, new_val_set_y_matrix)
    #yyy = clf.predict(new_test_set_x_matrix)
    #accuracy_score(y_test.values,yyy)
    return clf, new_test_set_x_matrix


#画图
# from pyecharts.charts import Radar
# schema=[
#     {"name":"准确率","max":1,"min":0},
#     {"name":"F1值","max":1,"min":0},
#     {"name":"精确率","max":1,"min":0},
#     {"name":"召回率","max":1,"min":0},
#     {"name":"AUC","max":1,"min":0}
# ]
# print(svm_val)
# radar=Radar()
# radar.add_schema(schema=schema,shape="polygon")
# radar.add("SVM",svm_val,color="red")
# radar.add("NN",nn_val,color="blue")
# radar.add("CNN",cnn_val,color="yellow")
# radar.add("NB",nb_val,color="green")
# radar.add("RF",rf_val,color="pink")
# radar.render("Radar.html")
