import pandas as pd
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.feature_selection import RFE
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import ttest_ind, levene
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, accuracy_score  # ROC 曲线 AUC 分类报告
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score, StratifiedKFold, RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from scipy.stats import randint
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, roc_auc_score


#读取数据
data = pd.read_excel('C:/Users/14053\PycharmProjects/model train/vgg16ronhe.xlsx')
x = data[data.columns[2:]]
y = data['label']
#x.columns = x.columns.astype(str)
x = StandardScaler().fit_transform(x)
x = MinMaxScaler().fit_transform(x)
x=pd.DataFrame(x)
print(x.shape)

estimator = RandomForestClassifier()
selector = RFE(estimator=estimator, n_features_to_select=20,step=20)
# 使用特征递归消除法来训练模型并选择最优的20个特征
x = selector.fit_transform(x, y)
# 打印最优的20个特征的索引和名称
print(selector.get_support(indices=True))

# 打印特征选择后的数据集
print(x)

# 获取选定特征的列名
selected_columns = data.columns[:-1][selector.support_]


#PCA特征筛选
model_pca = PCA(n_components=20)
model_pca.fit(x)
print(model_pca.explained_variance_)
print(model_pca.explained_variance_ratio_)
x = model_pca.fit_transform(x)
print(x.shape)
# 将筛选出的特征转换回DataFrame
x = pd.DataFrame(x)

# 将筛选出的特征保存到Excel文件
x.to_excel('E:\lung\model train\shengdu/vgg16_selecte.xlsx', index=False)


#分类器
#随机森林
print("随机森林：")
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)

model_rf = RandomForestClassifier(n_estimators=20
                                  , criterion='entropy'
                                  , random_state=20
                                  , class_weight='balanced'
                                  ).fit(x_train,y_train)
score_rf=model_rf.score(x_test,y_test)
#score_rf=model_rf.score(x_train,y_train)
print(score_rf)

param_dist = {
    'n_estimators': randint(10, 100),  # 10到100之间的随机整数
    'criterion': ['gini', 'entropy'],
    'max_depth': [None, 5, 10, 20],
    'min_samples_split': randint(2, 20),
    'min_samples_leaf': randint(1, 10),
    'max_features': ['sqrt', 'log2', None]
}
rf = RandomForestClassifier(random_state=20, class_weight='balanced')
# 创建随机搜索对象
model_rf = RandomizedSearchCV(rf, param_distributions=param_dist, n_iter=10, cv=5)
# 执行随机搜索
model_rf.fit(x_train, y_train)
score_rf=model_rf.score(x_test,y_test)
print(score_rf)
# 输出最佳参数组合和得分
print("最佳参数组合：", model_rf.best_params_)
print("最佳得分：", model_rf.best_score_)




#五折交叉验证
"""
rkf=RepeatedKFold(n_splits=5,n_repeats=1)
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    model_rf = RandomForestClassifier(n_estimators=20
                                  , criterion='entropy'
                                  , random_state=20
                                  , class_weight='balanced'
                                  ).fit(x_train,y_train)
    score_rf=model_rf.score(x_test,y_test)
    print(score_rf)
"""


"""
#随机森林roc曲线
y_test_probs = model_rf.predict_proba(x_test)  # 获取测试集预测结果的预测概率
# 阈值 thresholds=1 时的 FPR、TPR
fpr, tpr, thresholds = roc_curve(y_test, y_test_probs[:,1], pos_label = 1)
# y_test 测试集的 y 值
# y_test_probs[:,1] 测试集预测结果为 1 的概率
# pos_label = 1  预测结果=1 的为阳性
plt.figure()
plt.plot(fpr, tpr, marker = 'o')
plt.xlabel("1 - Specificity")  # 特异度 FPR 假阳性率
plt.ylabel("Sensitivity")  # 灵敏度 TPR 真阳性率
plt.show()
# 通过测试集的实际值 y_test 和预测值计算 AUC
auc_score = roc_auc_score(y_test,model_rf.predict(x_test))
print(auc_score)
"""

#支持向量机
#模型调参

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
Cs = np.logspace(-1,3,10,base=2)
gammas = np.logspace(-4, 1, 50,base=2)
param_grid = dict(C=Cs,gamma=gammas)
grid = GridSearchCV(svm.SVC(kernel='rbf'),param_grid=param_grid,cv=10).fit(x_train,y_train)
print(grid.best_params_)
C = grid.best_params_['C']
gamma = grid.best_params_['gamma']
model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
score_svm=model_svm.score(x_test,y_test)
#score_svm=model_svm.score(x_train,y_train)
print("向量机：")
print(score_svm)

"""
#5折交叉验证
rkf = RepeatedKFold(n_splits=5,n_repeats=1)
print("支持向量机AUC：")


for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
    score_svm=model_svm.score(x_test,y_test)
    print(score_svm)
"""

#k近邻

#不使用交叉验证
"""
knn_clf = KNeighborsClassifier(n_neighbors=5)
# 训练模型
knn_clf.fit(x_train, y_train)
# 测试模型的准确率
score_knn = knn_clf.score(x_test, y_test)
print(score_knn)#0.9256198347107438
"""
#k近邻
knn_clf = KNeighborsClassifier(n_neighbors=5)
knn_clf.fit(x_train, y_train)
score_knn = knn_clf.score(x_test, y_test)
#score_knn = knn_clf.score(x_train, y_train)
print("k近邻：")
print(score_knn)
"""
#5折交叉验证
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    knn_clf = KNeighborsClassifier(n_neighbors=5)
    knn_clf.fit(x_train, y_train)
    score_knn = knn_clf.score(x_test, y_test)
    print(score_knn)
"""

#模型集成
#soft voting

voting_clf = VotingClassifier(estimators=[('knn',knn_clf),('svc',model_svm),('rf',model_rf)],
                             voting='soft')
for clf in (knn_clf, model_svm, model_rf, voting_clf):
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    #y_pred = clf.predict(x_train)
    print(clf.__class__.__name__,accuracy_score(y_pred, y_test))
    #print(clf.__class__.__name__,accuracy_score(y_pred, y_train))

# 在每个分类器模型训练之后计算AUC和绘制曲线
y_test_probs_rf = model_rf.predict_proba(x_test)[:, 1]  # 随机森林模型的预测概率
y_test_probs_svm = model_svm.predict_proba(x_test)[:, 1]  # 支持向量机模型的预测概率
y_test_probs_knn = knn_clf.predict_proba(x_test)[:, 1]  # k近邻模型的预测概率
y_test_probs_voting = voting_clf.predict_proba(x_test)[:, 1]  # 软投票模型的预测概率

# 计算每个模型的FPR和TPR
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_test_probs_rf, pos_label=1)
fpr_svm, tpr_svm, _ = roc_curve(y_test, y_test_probs_svm, pos_label=1)
fpr_knn, tpr_knn, _ = roc_curve(y_test, y_test_probs_knn, pos_label=1)
fpr_voting, tpr_voting, _ = roc_curve(y_test, y_test_probs_voting, pos_label=1)

# 计算每个模型的AUC
auc_rf = roc_auc_score(y_test, y_test_probs_rf)
auc_svm = roc_auc_score(y_test, y_test_probs_svm)
auc_knn = roc_auc_score(y_test, y_test_probs_knn)
auc_voting = roc_auc_score(y_test, y_test_probs_voting)

# 绘制AUC曲线
plt.figure()
plt.plot(fpr_rf, tpr_rf, label='RF (AUC = %0.2f)' % auc_rf)
plt.plot(fpr_svm, tpr_svm, label='SVM (AUC = %0.2f)' % auc_svm)
plt.plot(fpr_knn, tpr_knn, label='KNN (AUC = %0.2f)' % auc_knn)
plt.plot(fpr_voting, tpr_voting, label='ME (AUC = %0.2f)' % auc_voting)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
plt.show()


