import pandas as pd
import pandas as pd
import numpy as np
from matplotlib.ticker import MultipleLocator
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import ttest_ind, levene
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, accuracy_score  # ROC 曲线 AUC 分类报告
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error








#读取数据
data = pd.read_excel('E:\lung\model train/vgg16-features.xlsx')
x = data[data.columns[2:]]
y = data['label']
x = StandardScaler().fit_transform(x)
x = MinMaxScaler().fit_transform(x)
x=pd.DataFrame(x)
print(x.shape)








#分类器
#随机森林

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
model_rf = RandomForestClassifier(n_estimators=20
                                  , criterion='entropy'
                                  , random_state=20
                                  , class_weight='balanced'
                                  ).fit(x_train,y_train)
score_rf=model_rf.score(x_test,y_test)
print("随机森林AUC：")
print(score_rf)#0.9752066115702479
"""
#随机森林roc曲线
y_test_probs = model_rf.predict_proba(x_test)  # 获取测试集预测结果的预测概率
# 阈值 thresholds=1 时的 FPR、TPR
fpr, tpr, thresholds = roc_curve(y_test, y_test_probs[:,1], pos_label = 1)
# y_test 测试集的 y 值
# y_test_probs[:,1] 测试集预测结果为 1 的概率
# pos_label = 1  预测结果=1 的为阳性
plt.figure()
plt.plot(fpr, tpr, marker = 'o')
plt.xlabel("1 - Specificity")  # 特异度 FPR 假阳性率
plt.ylabel("Sensitivity")  # 灵敏度 TPR 真阳性率
plt.show()
# 通过测试集的实际值 y_test 和预测值计算 AUC
auc_score = roc_auc_score(y_test,model_rf.predict(x_test))
print(auc_score)

"""
#支持向量机
#模型调参
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
Cs = np.logspace(-1,3,10,base=2)
gammas = np.logspace(-4, 1, 50,base=2)
param_grid = dict(C=Cs,gamma=gammas)
grid = GridSearchCV(svm.SVC(kernel='rbf'),param_grid=param_grid,cv=10).fit(x_train,y_train)
print(grid.best_params_)
C = grid.best_params_['C']
gamma = grid.best_params_['gamma']

#开始训练验证（不使用交叉验证）
#model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
#score_svm=model_svm.score(x_test,y_test)
#print(score_svm)#0.9256198347107438
#2次3折交叉验证
rkf = RepeatedKFold(n_splits=5,n_repeats=1)
print("支持向量机AUC：")
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
    score_svm=model_svm.score(x_test,y_test)
    print(score_svm)
#0.9012345679012346,0.95,0.925,0.975,0.9375

#k近邻
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
#不使用交叉验证

knn_clf = KNeighborsClassifier(n_neighbors=5)
# 训练模型
knn_clf.fit(x_train, y_train)
# 测试模型的准确率
score_knn = knn_clf.score(x_test, y_test)
print(score_knn)#0.9256198347107438
"""
"""
#5折交叉验证
rkf=RepeatedKFold(n_splits=5,n_repeats=1)
print("K近邻AUC：")
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    knn_clf = KNeighborsClassifier(n_neighbors=5)
    knn_clf.fit(x_train, y_train)
    score_knn = knn_clf.score(x_test, y_test)
    print(score_knn)
# 0.9256198347107438，0.8283582089552238，0.9029850746268657，0.9022556390977443

#模型集成
#soft voting
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
voting_clf = VotingClassifier(estimators=[('knn',knn_clf),('svc',model_svm),('rf',model_rf)],
                             voting='soft')
for clf in (knn_clf, model_svm, model_rf, voting_clf):
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print(clf.__class__.__name__,accuracy_score(y_pred, y_test))


