from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import ttest_ind, levene
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, accuracy_score  # ROC 曲线 AUC 分类报告
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score, StratifiedKFold, RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier

#读取数据
data = pd.read_excel('E:\lung\model train/vgg16ronhe.xlsx')
x = data[data.columns[2:]]
y = data['label']
x = StandardScaler().fit_transform(x)
x = MinMaxScaler().fit_transform(x)
x=pd.DataFrame(x)
print(x.shape)
#rf
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
model_rf = RandomForestClassifier(n_estimators=20
                                  , criterion='entropy'
                                  , random_state=20
                                  , class_weight='balanced'
                                  ).fit(x_train,y_train)
#svm
Cs = np.logspace(-1,3,10,base=2)
gammas = np.logspace(-4, 1, 50,base=2)
param_grid = dict(C=Cs,gamma=gammas)
grid = GridSearchCV(svm.SVC(kernel='rbf'),param_grid=param_grid,cv=10).fit(x_train,y_train)
print(grid.best_params_)
C = grid.best_params_['C']
gamma = grid.best_params_['gamma']
model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)

#k近邻
knn_clf = KNeighborsClassifier(n_neighbors=5).fit(x_train, y_train)


#模型集成
#soft voting

voting_clf = VotingClassifier(estimators=[('knn',knn_clf),('svc',model_svm),('rf',model_rf)],
                             voting='soft')
for clf in (knn_clf, model_svm, model_rf, voting_clf):
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    #y_pred = clf.predict(x_train)
    print(clf.__class__.__name__,accuracy_score(y_pred, y_test))
    #print(clf.__class__.__name__,accuracy_score(y_pred, y_train))



# 获取各个模型的预测概率或决策函数值
rf_scores = model_rf.predict_proba(x_test)[:, 1]
svm_scores = model_svm.predict_proba(x_test)[:, 1]
knn_scores = knn_clf.predict_proba(x_test)[:, 1]
voting_scores = clf.predict_proba(x_test)[:, 1]
# 计算各个模型的真阳性率、假阳性率和AUC值
rf_fpr, rf_tpr, _ = roc_curve(y_test, rf_scores)
rf_auc = roc_auc_score(y_test, rf_scores)

svm_fpr, svm_tpr, _ = roc_curve(y_test, svm_scores)
svm_auc = roc_auc_score(y_test, svm_scores)

knn_fpr, knn_tpr, _ = roc_curve(y_test, knn_scores)
knn_auc = roc_auc_score(y_test, knn_scores)

# 计算真阳性率、假阳性率和AUC值
fpr, tpr, _ = roc_curve(y_test, voting_scores)
ec_auc = roc_auc_score(y_test, voting_scores)

# 绘制AUC曲线
plt.figure()
plt.plot(rf_fpr, rf_tpr, label='Random Forest (AUC = %0.2f)' % rf_auc)
plt.plot(svm_fpr, svm_tpr, label='SVM (AUC = %0.2f)' % svm_auc)
plt.plot(knn_fpr, knn_tpr, label='KNN (AUC = %0.2f)' % knn_auc)
plt.plot(fpr, tpr, label='voting (AUC = %0.2f)' % ec_auc)
plt.plot([0, 1], [0, 1], 'k--')  # 绘制对角线
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()