import numpy as np
import pandas as pd
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, roc_auc_score, recall_score, confusion_matrix

x = pd.read_csv("D:\分类实验2次\\new_titanic.csv", usecols=['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked'])
y = pd.read_csv("D:\分类实验2次\\new_titanic.csv", usecols=['Survived'])
x = np.array(x)
y = np.array(y).flatten() #转为一维数组flatten()
# print(x.shape)
# print(y.shape)
kfold = KFold(n_splits=10,random_state=None, shuffle=False)

accuracy1 = []
precision1 = []
auc1 = []
sensitivity1 = []
specificity1 = []

for train, test in kfold.split(x, y):
    #DecisionTreeClassifier
    # clf = tree.DecisionTreeClassifier()
    # clf = clf.fit(x[train], y[train])
    # y_pred = clf.predict(x[test])
    # y_pred_pro = clf.predict_proba(x[test])

    #RandomForestClassifier
    rf = RandomForestClassifier()
    rf = rf.fit(x, y)
    y_pred = rf.predict(x[test])
    y_pred_pro = rf.predict_proba(x[test])


    confusion = confusion_matrix(y[test], y_pred) #混淆矩阵   其每一列代表预测值，每一行代表的是实际的类别
    TP = confusion[1, 1]
    TN = confusion[0, 0]
    FP = confusion[0, 1]
    FN = confusion[1, 0]

    # print(x[test])
    accuracy = accuracy_score(y[test], y_pred)
    # print(accuracy)
    precision = precision_score(y[test], y_pred)
    # print(precision)
    sensitivity = recall_score(y[test], y_pred)  #sensitivity=recall
    # print(sensitivity)
    specificity = TN / (FP + TN)
    # print(specificity)

    # print(y[test])
    # print(y[test].shape)
    # print(y_pred_pro[:,1])
    # print(y_pred_pro[:,1].shape)
    auc = roc_auc_score(y[test], y_pred_pro[:,1]) #第一列是预测0的预测概率，预测1的概率，注意数组shape

    accuracy1.append(accuracy)
    precision1.append(precision)
    sensitivity1.append(sensitivity)
    specificity1.append(specificity)
    auc1.append(auc)

# print("\nDecisionTreeClassifier aveage accuracy over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(accuracy1), np.std(accuracy1)))
# print("\nDecisionTreeClassifier aveage precision over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(precision1), np.std(precision1)))
# print("\nDecisionTreeClassifier aveage auc over 10-flod cross validation: %.5f (+/- %.5f)" % (np.mean(auc1), np.std(auc1)))
# print("\nDecisionTreeClassifier aveage sensitivity over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(sensitivity1), np.std(sensitivity1)))
# print("\nDecisionTreeClassifier aveage specificity over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(specificity1), np.std(specificity1)))


print("\nRandomForestClassifier aveage accuracy over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(accuracy1), np.std(accuracy1)))
print("\nRandomForestClassifier aveage precision over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(precision1), np.std(precision1)))
print("\nRandomForestClassifier aveage auc over 10-flod cross validation: %.5f (+/- %.5f)" % (np.mean(auc1), np.std(auc1)))
print("\nRandomForestClassifier aveage sensitivity over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(sensitivity1), np.std(sensitivity1)))
print("\nRandomForestClassifier aveage specificity over 10-flod cross validation: %.6f (+/- %.6f)" % (np.mean(specificity1), np.std(specificity1)))
