import pandas as pd
import pandas as pd
import numpy as np
from matplotlib.ticker import MultipleLocator
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy.stats import ttest_ind, levene
from sklearn.linear_model import LassoCV
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.metrics import roc_curve, roc_auc_score, classification_report, accuracy_score  # ROC 曲线 AUC 分类报告
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score, StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error


#t检验
# 导入数据
xlsx_0 = 'E:\lung\model train\pyradiomics/l-pyradiomics-features1.xlsx'
xlsx_1 = 'E:\lung\model train\pyradiomics/e-pyradiomics-features1.xlsx'
data_0 = pd.read_excel(xlsx_0)
data_1 = pd.read_excel(xlsx_1)
#输出数据形状
print(data_0.shape,data_1.shape)

# 数据预处理（加分组标签后合并）
rows_0,cols_0 = data_0.shape
rows_1,cols_1 = data_1.shape
#添加标签列
data_0.insert(0, 'label', [0] * rows_0)
data_1.insert(0, 'label', [1] * rows_1)
#合并数据
data = pd.concat([data_0,data_1])

#数据进行z-score和均数归一化处理
x = data[data.columns[2:]]
y = data['label']
#x = StandardScaler().fit_transform(x)
#x = MinMaxScaler().fit_transform(x)
x=pd.DataFrame(x)
# T 检验特征筛选（训练集）
index = []
for colName in data.columns[2:]:
    if levene(data_0[colName], data_1[colName])[1] > 0.05:
        if ttest_ind(data_0[colName], data_1[colName])[1] < 0.05:
            index.append(colName)
    else:
        if ttest_ind(data_0[colName], data_1[colName], equal_var=False)[1] < 0.05:
            index.append(colName)
print(len(index))
#print(index)
#筛选出数据

if 'label'not in index:index=['label']+index
#data只选取筛选出的特征
data_0 = data_0[index]
data_1 = data_1[index]
data = pd.concat([data_0, data_1])
x = data[data.columns[2:]]
y = data['label']
colNames = x.columns
x = StandardScaler().fit_transform(x)
#x = MinMaxScaler().fit_transform(x)
x=pd.DataFrame(x)

x.columns = colNames
#print(index)


# LASSO 特征筛选
alphas = np.logspace(-5.5,-2,50)
model_lassoCV = LassoCV(alphas = alphas,cv=5, max_iter = 100000, normalize=True).fit(x,y)
coef = pd.Series(model_lassoCV.coef_, index = x.columns)

print(model_lassoCV.alpha_)
print('%s %d'%('Lasso picked',sum(coef != 0)))

x_lasso = coef[coef != 0].nlargest(12).index

print(coef[coef != 0].nlargest(12))
# 将label 列和筛选出的特征保存到 Excel 文件
data.reset_index(drop=True, inplace=True)
selected_data = pd.concat([pd.Series(data.iloc[:, 0]), x[x_lasso]], axis=1)
selected_data.to_excel('pyradiomics_selected_features1.xlsx', index=False)


"""
#特征权重展示图
x_values = coef[coef != 0].nlargest(12).index

y_values = coef[coef != 0].nlargest(12).values

plt.bar(x_values, y_values
        , color = 'lightblue'
        , edgecolor = 'black'
        , alpha = 0.8
        ,label='features weight'
        )
plt.xticks(x_values
           ,rotation='30'
           ,ha='right'
           ,va='top'
           ,fontsize=7)
plt.legend(loc='lower left',fontsize=7)
plt.ylabel("weight",fontsize=7)
plt.show()

MSEs=model_lassoCV.mse_path_
MSEs_mean=np.apply_along_axis(np.mean,1,MSEs)
MSEs_std=np.apply_along_axis(np.std,1,MSEs)
plt.figure()
plt.errorbar(model_lassoCV.alphas_,MSEs_mean
             , yerr=MSEs_std
             ,fmt="o"
             ,ms=3
             ,mfc="r"
             ,mec="r"
             ,ecolor="lightblue"
             ,elinewidth=2
             ,capsize=4
             ,capthick=1)
plt.semilogx()
plt.axvline(model_lassoCV.alpha_,color='black',ls="--")
plt.xlabel('alpha')
plt.ylabel('Mean Square error')
ax=plt.gca()
y_major_locator=MultipleLocator(0.05)
ax.yaxis.set_major_locator(y_major_locator)
plt.show()
"""
"""

#分类器
#随机森林

x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
model_rf = RandomForestClassifier(n_estimators=20
                                  , criterion='entropy'
                                  , random_state=20
                                  , class_weight='balanced'
                                  ).fit(x_train,y_train)
score_rf=model_rf.score(x_test,y_test)
print("随机森林AUC：")
print(score_rf)#0.9752066115702479

"""
"""
#随机森林roc曲线
y_test_probs = model_rf.predict_proba(x_test)  # 获取测试集预测结果的预测概率
# 阈值 thresholds=1 时的 FPR、TPR
fpr, tpr, thresholds = roc_curve(y_test, y_test_probs[:,1], pos_label = 1)
# y_test 测试集的 y 值
# y_test_probs[:,1] 测试集预测结果为 1 的概率
# pos_label = 1  预测结果=1 的为阳性
plt.figure()
plt.plot(fpr, tpr, marker = 'o')
plt.xlabel("1 - Specificity")  # 特异度 FPR 假阳性率
plt.ylabel("Sensitivity")  # 灵敏度 TPR 真阳性率
plt.show()
# 通过测试集的实际值 y_test 和预测值计算 AUC
auc_score = roc_auc_score(y_test,model_rf.predict(x_test))
print(auc_score)
"""
"""
#支持向量机
#模型调参
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
Cs = np.logspace(-1,3,10,base=2)
gammas = np.logspace(-4, 1, 50,base=2)
param_grid = dict(C=Cs,gamma=gammas)
grid = GridSearchCV(svm.SVC(kernel='rbf'),param_grid=param_grid,cv=10).fit(x_train,y_train)
print(grid.best_params_)
C = grid.best_params_['C']
gamma = grid.best_params_['gamma']

#开始训练验证（不使用交叉验证）
#model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
#score_svm=model_svm.score(x_test,y_test)
#print(score_svm)#0.9256198347107438
#2次3折交叉验证
rkf = RepeatedKFold(n_splits=5,n_repeats=1)
print("支持向量机AUC：")
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    model_svm=svm.SVC(kernel='rbf',C=C,gamma=gamma,probability=True).fit(x_train,y_train)
    score_svm=model_svm.score(x_test,y_test)
    print(score_svm)
#0.9012345679012346,0.95,0.925,0.975,0.9375

#k近邻
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
#不使用交叉验证
"""
"""
knn_clf = KNeighborsClassifier(n_neighbors=5)
# 训练模型
knn_clf.fit(x_train, y_train)
# 测试模型的准确率
score_knn = knn_clf.score(x_test, y_test)
print(score_knn)#0.9256198347107438
"""
"""
#5折交叉验证
rkf=RepeatedKFold(n_splits=5,n_repeats=1)
print("K近邻AUC：")
for train_index, test_index in rkf.split(x):
    x_train=x.iloc[train_index]
    x_test = x.iloc[test_index]
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]
    knn_clf = KNeighborsClassifier(n_neighbors=5)
    knn_clf.fit(x_train, y_train)
    score_knn = knn_clf.score(x_test, y_test)
    print(score_knn) 
# 0.9256198347107438，0.8283582089552238，0.9029850746268657，0.9022556390977443

#模型集成
#soft voting
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3, random_state = 15)
voting_clf = VotingClassifier(estimators=[('knn',knn_clf),('svc',model_svm),('rf',model_rf)],
                             voting='soft')
for clf in (knn_clf, model_svm, model_rf, voting_clf):
    clf.fit(x_train, y_train)
    y_pred = clf.predict(x_test)
    print(clf.__class__.__name__,accuracy_score(y_pred, y_test))


"""