#!/usr/bin/env python
# coding: utf-8

# # 导入包

# In[316]:

import sys
import pandas as pd
import numpy as np
from collections import Counter

from IPython import get_ipython
from IPython.core.display_functions import display
from scipy.constants import alpha
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LassoCV
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib import cm
import seaborn as sns
from scipy.stats import mannwhitneyu, fisher_exact
from sklearn.metrics import classification_report, RocCurveDisplay
from imblearn.over_sampling import SMOTE

# ### 导入数据并查看数据基本情况

# In[2]:


dataPath = sys.argv[1]
user = sys.argv[2]
filePath = sys.argv[3]
data = pd.read_excel(dataPath)  # "C:\Users\lizemao\Desktop\FeatureExtraction"


# # 设置显示选项
# pd.set_option('display.max_rows', 0)  # 显示所有行
# pd.set_option('display.max_columns', 0)  # 显示所有列

# 获取描述性统计信息
print(data.describe())

# ### 数据整理及查看统计

# In[3]:

# 关闭交互模式
plt.ioff()

y = data['prognosis']
print(Counter(y))

# In[4]:


data_a = data[y == 0]
data_b = data[y == 1]
print(data_a.shape, data_b.shape)

# ### 初始的临床评分是否能有效预后

# In[5]:

# 绘制第一张图 ############################################################
plt.figure()
third_column_data = data.iloc[:, 2]
plt.scatter(third_column_data, y)  # prognosis
# 将结果写入文件
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure1.png")
with open("BaseWeb/src/main/resources/static/data/figure1_data.txt", "w") as f:
    for a, y_val in zip(third_column_data, y):
        f.write(f"{a}, {y_val}\n")
#  plt.show()
plt.close()
# In[6]:

# 绘制第二张图 ############################################################
# # 没有rad-score列
sns.violinplot(y=data['gender'], x=y)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure2.png")
with open("BaseWeb/src/main/resources/static/data/figure2_data.txt", "w") as f:
    for gender, prog in zip(data['gender'], y):
        f.write(f"{gender}, {prog}\n")
#  plt.show()
plt.close()
# ### 性别是否有显著影响

# 这里定义0位女性，1为男性

# In[7]:


Counter(data[data['prognosis'] == 0]['gender'])

# In[8]:


confusion_matrix(data['prognosis'], data['gender'])

# In[9]:


fisher_exact(confusion_matrix(data['prognosis'], data['gender']))

# In[10]:


X = data.iloc[:, 1:]
X.head()

# ### 划分训练集、测试集

# In[177]:


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=21, stratify=y)

# In[178]:


X_train_a = X_train[y_train == 0]
X_train_b = X_train[y_train == 1]

# In[179]:


X_train_a.shape, X_train_b.shape  # 训练组0和1值

# In[180]:


Counter(X_train[y_train == 0]['gender'])  # 训练0组男女有多少

# In[181]:


Counter(X_train[y_train == 1]['gender'])  # 训练1组男女有多少

# In[182]:


X_test_a = X_test[y_test == 0]
X_test_b = X_test[y_test == 1]

# In[183]:


X_test_a.shape, X_test_b.shape  # 测试组0和1值

# In[184]:


Counter(X_test[y_test == 0]['gender'])  # 测试0组男女有多少

# In[185]:


Counter(X_test[y_test == 1]['gender'])  # 测试1组男女有多少

# In[186]:


X_train.describe()  # 训练组平均值标准差

# In[187]:


print(X_train)

# In[188]:


# resultPath =  'C:/Users/lizemao/Desktop/da/train.xlsx'
# X_train.to_excel(resultPath,sheet_name="train again",index=True,na_rep=0,inf_rep=0)


# In[189]:


print(X_test.describe())  # 测试组平均值标准差

# In[190]:


# resultPatha =  'C:/Users/lizemao/Desktop/da/test.xlsx'
# X_test.to_excel(resultPatha,sheet_name="test again",index=False,na_rep=0,inf_rep=0)


# ### 曼-惠特尼U秩和检验

# In[191]:


colNamesSel_mwU = []
for colName in X_train_a.columns[:]:
    if mannwhitneyu(X_train_a[colName], X_train_b[colName])[1] < 0.05:
        colNamesSel_mwU.append(colName)
print(len(colNamesSel_mwU))
print(colNamesSel_mwU)

# In[192]:


from sklearn.feature_selection import RFE
from sklearn.ensemble import RandomForestClassifier

# In[193]:


X_train_mul = X_train[colNamesSel_mwU]
X_test_mul = X_test[colNamesSel_mwU]
X_train_mul

# ### 数据标准化

# In[194]:


scaler = StandardScaler()
X_train_mul_scal = scaler.fit_transform(X_train_mul)
X_test_mul_scal = scaler.transform(X_test_mul)

# In[195]:


X_train_mul_scal = pd.DataFrame(X_train_mul_scal, columns=colNamesSel_mwU)
X_test_mul_scal = pd.DataFrame(X_test_mul_scal, columns=colNamesSel_mwU)
print(X_train_mul_scal)

# In[196]:


RFC = RandomForestClassifier(n_estimators=25, random_state=11)
selector_RFE = RFE(RFC, n_features_to_select=8, step=1).fit(X_train_mul_scal, y_train)

# In[197]:


selector_RFE.n_features_

# In[198]:


selector_RFE.ranking_

# In[199]:


selector_RFE.support_

# In[200]:


X_RFE = X_train_mul_scal[X_train_mul_scal.columns[selector_RFE.support_]]
X_RFE

# ### LASSO降维

# In[201]:


alphas = np.logspace(-10, 1, 100, base=10)
selector_lasso = LassoCV(alphas=alphas, cv=10, max_iter=1000000)
selector_lasso.fit(X_train_mul_scal, y_train)
print(selector_lasso.alpha_)
values = selector_lasso.coef_[selector_lasso.coef_ != 0]
colNames_sel = X_train_mul_scal.columns[selector_lasso.coef_ != 0]
print(colNames_sel)
print(len(X_train_mul_scal.columns[selector_lasso.coef_ != 0]))

# In[202]:

# ####################### 第三张图####################################
width = 0.45
plt.bar(colNames_sel, values
        , color='darkseagreen'  # turquoise
        , alpha=0.7
        , edgecolor='black'
        )
plt.xticks(np.arange(len(colNames_sel)), colNames_sel  # xticks
           , rotation=35
           , ha='right'
           , fontsize='small')
plt.ylabel("Feature Coefficient")  # ylabel
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure3.png")
with open("BaseWeb/src/main/resources/static/data/figure3_data.txt", "w") as f:
    for feature, coef in zip(colNames_sel, values):
        f.write(f"{feature}, {coef}\n")
#  plt.show()
plt.close()
# In[203]:

# 绘制第四张图 ############################################################
MSEs_mean = selector_lasso.mse_path_.mean(axis=1)
MSEs_std = selector_lasso.mse_path_.std(axis=1)
plt.figure()
plt.errorbar(selector_lasso.alphas_, MSEs_mean  # x, y数据，一一对应
             , yerr=MSEs_std  # y误差范围
             , fmt="o"  # 数据点标记
             , ms=3  # 数据点大小
             , mfc="sienna"  # 数据点颜色
             , mec="sienna"  # 数据点边缘颜色
             , ecolor="darkseagreen"  # 误差棒颜色
             , elinewidth=2  # 误差棒线宽
             , capsize=4  # 误差棒边界线长度
             , capthick=1)  # 误差棒边界线厚度
plt.semilogx()
plt.axvline(selector_lasso.alpha_, color='black', ls="--")
plt.xlim(1e-3, 10)
plt.xlabel('Lambda')
plt.ylabel('MSE')
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure4.png")
with open("BaseWeb/src/main/resources/static/data/figure4_data.txt", "w") as f:
    for alpha, mse_mean, mse_std in zip(selector_lasso.alphas_, MSEs_mean, MSEs_std):
        f.write(f"{alpha}, {mse_mean}, {mse_std}\n")
#  plt.show()
plt.close()
# In[204]:

# 绘制第五张图 ############################################################
coefs = selector_lasso.path(X_train_mul_scal, y_train, alphas=alphas, max_iter=1e6
                            )[1].T
plt.figure()
plt.semilogx(selector_lasso.alphas_, coefs, '-')
plt.axvline(selector_lasso.alpha_, color='black', ls="--")
plt.xlim(1e-3, 10)
plt.ylim(-0.5, 0.5)
plt.xlabel('Lambda')
plt.ylabel('Coefficients')
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure5.png")
with open("BaseWeb/src/main/resources/static/data/figure5_data.txt", "w") as f:
    for coef in coefs:
        f.write(", ".join(map(str, coef)) + "\n")
#  plt.show()
plt.close()
# ### 计算Rad-score

# In[205]:


selector_lasso.intercept_

# In[206]:


means_sel = scaler.mean_[selector_lasso.coef_ != 0]
means_sel

# In[207]:


stds_sel = scaler.scale_[selector_lasso.coef_ != 0]
stds_sel

# In[208]:


values_trans = values / stds_sel
values_trans

# In[209]:


intercept_trans = -(values / stds_sel * means_sel).sum() + selector_lasso.intercept_
intercept_trans  # 截距

# In[210]:


for a, b in zip(colNames_sel, values_trans):
    print(a, b)

# In[211]:


radScores_test = (X_test[colNames_sel] * values_trans).sum(axis=1) + intercept_trans
radScores_test

# In[212]:


X_train_mul_scal_lasso = X_train_mul_scal[X_train_mul_scal.columns[selector_lasso.coef_ != 0]]
X_test_mul_scal_lasso = X_test_mul_scal[X_test_mul_scal.columns[selector_lasso.coef_ != 0]]

# ### SVM分类

# In[213]:


Cs = np.logspace(-1, 3, 10, base=2)
gammas = np.logspace(-5, 1, 10)
param_grid = {
    'C': Cs
    , 'gamma': gammas
    #             , 'kernel': ('rbf','linear')
}
GS = GridSearchCV(SVC()
                  , param_grid=param_grid
                  , cv=10
                  )
GS.fit(X_train_mul_scal, y_train)
print(GS.best_params_)
C = GS.best_params_['C']
gamma = GS.best_params_['gamma']

# In[214]:


svc = SVC(
    C=C
    , gamma=gamma
    #         , kernel=kernel
    , probability=True
)
svc.fit(X_train_mul_scal_lasso, y_train)  # X_train_mul_scal_lasso,y_train,X_test_mul_scal_lasso,y_test
print(svc.score(X_train_mul_scal_lasso, y_train))
print(svc.score(X_test_mul_scal_lasso, y_test))

# In[215]:


smo = SMOTE()  # 数据合成，考虑数据不平衡
X_train_mul_scal_lasso_smo, y_train_smo = smo.fit_resample(X_train_mul_scal_lasso, y_train)
svc = SVC(
    C=C
    , gamma=gamma
    #         , kernel=kernel
    , probability=True
)
svc.fit(X_train_mul_scal_lasso_smo, y_train_smo)
print(svc.score(X_train_mul_scal_lasso_smo, y_train_smo))  # 训练集得分
print(svc.score(X_test_mul_scal_lasso, y_test))  # 测试集得分

# In[216]:


y_test_prob = svc.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_pred = svc.predict(X_test_mul_scal_lasso)
y_train_probsvm = svc.predict_proba(X_train_mul_scal_lasso)[:, 1]
y_train_predsvm = svc.predict(X_train_mul_scal_lasso)

# In[217]:


print(classification_report(y_test, y_test_pred))  # SVM

# In[218]:


RocCurveDisplay.from_estimator(svc, X_test_mul_scal_lasso, y_test)
# plot_roc_curve(svc, X_train_mul_scal_lasso_smo,y_train_smo)


# In[219]:


import matplotlib.pyplot as plt
from sklearn import datasets, metrics, model_selection, svm
from sklearn.metrics import roc_curve  # 导入 roc_curve
# 绘制第六张图 ############################################################
f, ax = plt.subplots(dpi=60)
metrics.RocCurveDisplay.from_estimator(svc, X_test_mul_scal_lasso, y_test, ax=ax, name='test roc')
metrics.RocCurveDisplay.from_estimator(svc, X_train_mul_scal_lasso_smo, y_train_smo, ax=ax, name='train roc')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':', label='chance')
plt.title('ROC', fontsize=14)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure6.png")
with open("BaseWeb/src/main/resources/static/data/figure6_data.txt", "w") as f:
    fpr, tpr, _ = roc_curve(y_test, y_test_prob)
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
# X_train_mul_scal_lasso_smo,y_train_smo
plt.close()

# In[ ]:


# In[220]:


from sklearn.linear_model import LogisticRegression

clf = LogisticRegression(random_state=0)  # 实例化一个逻辑回归分类器
clf.fit(X_train_mul_scal_lasso, y_train)  # 训练

y_predlr = clf.predict(X_test_mul_scal_lasso)  # 预测
display(y_predlr)

y_probalr = clf.predict_proba(X_test_mul_scal_lasso)
# display(y_proba)

import numpy as np
import pandas as pd


y_test_np = y_test.to_numpy() if isinstance(y_test, pd.Series) else y_test
y_probalr_np = y_probalr.to_numpy() if isinstance(y_probalr, pd.Series) else y_probalr
# pd.DataFrame(
#     np.append(np.append(y_test[:, np.newaxis], y_predlr[:, np.newaxis], axis=1), y_probalr[:, 1][:, np.newaxis],
#               axis=1),
#     columns=['y_test', 'y_pred', 'y_proba'])
# 创建 DataFrame
pd.DataFrame(
    np.append(np.append(y_test_np[:, np.newaxis], y_predlr[:, np.newaxis], axis=1), y_probalr_np[:, 1][:, np.newaxis],
              axis=1),
    columns=['y_test', 'y_pred', 'y_proba'])
# In[221]:


import numpy as np  # 测试集
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt

# 使用逻辑回归作为分类器 ，random_state参数设置为42，用于确保每次运行代码时，都能获得相同的随机结果，从而可以复现结果。
classifier = LogisticRegression(random_state=42)
classifier.fit(X_train_mul_scal_lasso, y_train)

# 预测概率  ，给定测试数据，返回预测的每个类别的概率。
# y_score = classifier.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_problr = classifier.predict_proba(X_test_mul_scal_lasso)[:, 1]
# y_test_predlr = classifier.predict(X_test_mul_scal_lasso)

# 计算ROC曲线
fpr, tpr, _ = roc_curve(y_test, y_test_problr)
roc_auc = auc(fpr, tpr)



# 绘制第七张图 ############################################################
# 绘制ROC曲线
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='BLUE', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='GRAY', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('LR_ROC')
plt.legend(loc="lower right")
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure7.png")
with open("BaseWeb/src/main/resources/static/data/figure7_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[222]:


import numpy as np  # 训练集
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt

# 使用逻辑回归作为分类器 ，random_state参数设置为42，用于确保每次运行代码时，都能获得相同的随机结果，从而可以复现结果。
classifier = LogisticRegression(random_state=42)
classifier.fit(X_train_mul_scal_lasso, y_train)

# 预测概率  ，给定测试数据，返回预测的每个类别的概率。
# y_score = classifier.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_problr = classifier.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_predlr = classifier.predict(X_test_mul_scal_lasso)
y_train_problr2 = classifier.predict_proba(X_train_mul_scal_lasso)[:, 1]
y_train_predlr2 = classifier.predict(X_train_mul_scal_lasso)

# 计算ROC曲线
fpr2, tpr2, _ = roc_curve(y_train, y_train_problr2)
roc_auc = auc(fpr2, tpr2)


# # 绘制第八张图 ############################################################
# # 绘制ROC曲线
# plt.figure()
# lw = 2
# plt.plot(fpr, tpr, color='BLUE', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
# plt.plot([0, 1], [0, 1], color='GRAY', lw=lw, linestyle='--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('LR_ROC')
# plt.legend(loc="lower right")
# plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure7.png")
# with open("BaseWeb/src/main/resources/static/data/figure7_data.txt", "w") as f:
#     for fp, tp in zip(fpr, tpr):
#         f.write(f"{fp}, {tp}\n")
# #  plt.show()
#plt.close()
# In[223]:


import xgboost as xgb

# In[224]:


from xgboost import XGBClassifier  # 后加
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt

# In[225]:


clf_XGB = XGBClassifier(use_label_encoder=False)  # X_train_mul_scal_lasso,y_train,X_test_mul_scal_lasso,y_test
clf_XGB.fit(X_train_mul_scal_lasso, y_train)  # X_train, y_train
score = clf_XGB.score(X_test_mul_scal_lasso, y_test)  # X_test, y_test
score

# In[226]:

# 绘制第九张图 ############################################################
scoreTrainList, scoreTestList = [], []
maxTreeNum = 100  # 评估器个数
for i in range(1, maxTreeNum):
    clf_XGB = XGBClassifier(random_state=21
                            , n_estimators=i
                            , objective='binary:hinge'
                            , use_label_encoder=False
                            )
    clf_XGB.fit(X_train_mul_scal_lasso, y_train)
    score_test = clf_XGB.score(X_test_mul_scal_lasso, y_test)
    score_train = clf_XGB.score(X_train_mul_scal_lasso, y_train)
    scoreTestList.append(score_test)
    scoreTrainList.append(score_train)
plt.plot(range(1, maxTreeNum), scoreTestList, label='Test')
plt.plot(range(1, maxTreeNum), scoreTrainList, label='Train')
plt.legend()
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure9.png")
with open("BaseWeb/src/main/resources/static/data/figure9_data.txt", "w") as f:
    for i, test_score, train_score in zip(range(1, maxTreeNum), scoreTestList, scoreTrainList):
        f.write(f"{i}, {test_score}, {train_score}\n")
#  plt.show()
plt.close()
print(max(scoreTestList), scoreTestList.index(max(scoreTestList)) + 1)

# In[227]:


# 1.调包
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn import metrics

# 2.读数据
# binary_data = pd.read_excel('/Users/Macbook/Desktop/规模预测/data/binary_data.xlsx')
# X = binary_data[['Feature1','Feature2','Feature3','Feature4','Feature5']]
# y = binary_data['Veracity']
# 注意要将数据转换为numpy数值
# X_train, X_test, y_train, y_test = train_test_split(np.array(X),np.array(y), test_size=0.4,random_state = 0)

# 3.拟合模型
model = XGBClassifier()
model = model.fit(X_train_mul_scal_lasso, y_train)
y_pred1 = model.predict(X_train_mul_scal_lasso)
y_pred2 = model.predict(X_test_mul_scal_lasso)
y_pred_proba = model.predict_proba(X_test_mul_scal_lasso)  # X_test_mul_scal_lasso
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_proba[:, 1], pos_label=1)
roc_auc = metrics.auc(fpr, tpr)


# 绘制第十张图 ############################################################
# 4.绘图
plt.figure()
lw = 2
plt.plot(
    fpr,
    tpr,
    color="green",
    lw=lw,
    label="ROC curve (area = %0.2f)" % roc_auc,
)
plt.plot([0, 1], [0, 1], color="orange", lw=lw, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic example")
plt.legend(loc="lower right")
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure10.png")
with open("BaseWeb/src/main/resources/static/data/figure10_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
# plt.savefig('auc_roc.pdf')
#  plt.show()
plt.close()
# In[228]:


import matplotlib.pyplot as plt
from sklearn import datasets, metrics, model_selection, svm


# 绘制第十一张图 ############################################################
f, ax = plt.subplots(dpi=60)
metrics.RocCurveDisplay.from_estimator(model, X_test_mul_scal_lasso, y_test, ax=ax, name='test roc')
metrics.RocCurveDisplay.from_estimator(model, X_train_mul_scal_lasso, y_train, ax=ax, name='train roc')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':', label='chance')
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure11.png")
with open("BaseWeb/src/main/resources/static/data/figure11_data.txt", "w") as f:
    fpr, tpr, _ = roc_curve(y_test, y_pred_proba[:, 1])
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
plt.title('ROC', fontsize=14)

#  plt.show()
plt.close()
# In[229]:


# roc curve
from sklearn.metrics import roc_curve, auc

fpr = dict()
tpr = dict()
roc_auc = dict()

y_proba_train = model.predict_proba(X_train_mul_scal_lasso)[:, 1]  # model、clf_XGB
y_proba_test = model.predict_proba(X_test_mul_scal_lasso)[:, 1]

fpr['train'], tpr['train'], _ = roc_curve(y_train, y_proba_train)  # 训练集上的roc曲线坐标值
fpr['test'], tpr['test'], _ = roc_curve(y_test, y_proba_test)  # 测试集上的roc曲线坐标值
roc_auc['train'] = auc(fpr['train'], tpr['train'])  # 计算roc曲线下面积auc
roc_auc['test'] = auc(fpr['test'], tpr['test'])

# 画图：
import matplotlib.pyplot as plt
# 绘制第十二张图 ############################################################
plt.figure(dpi=100)
plt.plot(fpr['train'], tpr['train'],
         lw=2,
         label='ROC on training-set (auc = %0.4f)' % roc_auc['train'])
plt.plot(fpr['test'], tpr['test'],
         lw=2,
         label='ROC on test-set (auc = %0.4f)' % roc_auc['test'])
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle=':', label='chance')

# # plt.xlim([0.0, 1.0])
# # plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')  # x坐标轴名字
plt.ylabel('True Positive Rate')  # y坐标轴名字
plt.title('ROC Curve of XGBoost')  # 标题
plt.legend(loc="lower right")  # 图例
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure12.png")
with open("BaseWeb/src/main/resources/static/data/figure12_data.txt", "w") as f:
    for fp, tp in zip(fpr['train'], tpr['train']):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[230]:


from sklearn.metrics import confusion_matrix  # SVM测试集

confusion_matrix(y_test, y_test_pred)  # 计算混淆矩阵y_test, y_test_pred

# In[231]:


from sklearn.metrics import confusion_matrix  # Xgboost测试集

confusion_matrix(y_test, y_pred2)

# In[232]:


from sklearn.metrics import confusion_matrix  # LR测试集

confusion_matrix(y_test, y_test_predlr)

# In[233]:


from sklearn.metrics import confusion_matrix  # SVM训练集

confusion_matrix(y_train, y_train_predsvm)  # 计算混淆矩阵y_test, y_test_pred

# In[234]:


from sklearn.metrics import confusion_matrix  # XG训练集

confusion_matrix(y_train, y_pred1)  # 计算混淆矩阵y_test, y_test_pred

# In[235]:


from sklearn.metrics import confusion_matrix  # LR训练集

confusion_matrix(y_train, y_train_predlr2)  # 计算混淆矩阵y_test, y_test_pred

# In[236]:


print(classification_report(y_test, y_pred2))  # Xgboost

# In[237]:


# confusion_matrix
from sklearn.metrics import RocCurveDisplay
import numpy as np
import matplotlib.pyplot as plt

# classes = ['A','B','C','D','E']
# confusion_matrix = np.array([(9,1,3,4,0),(2,13,1,3,4),(1,4,10,0,13),(3,1,1,17,0),(0,0,0,1,14)],dtype=float64)


# 标签
classes = ['good prognosis', 'poor prognosis']

classNamber = 2  # 类别数量

# 混淆矩阵

confusion_matrix = np.array([
    (20, 4),
    (3, 29)
], dtype=float)

# 绘制第十三张图 ############################################################
plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Blues, alpha=0.5)  # 按照像素显示出矩阵
plt.title('confusion_matrix-SVM')  # 改图名
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=-45)
plt.yticks(tick_marks, classes)

thresh = confusion_matrix.max() / 2.
# iters = [[i,j] for i in range(len(classes)) for j in range((classes))]
# ij配对，遍历矩阵迭代器
iters = np.reshape([[[i, j] for j in range(classNamber)] for i in range(classNamber)], (confusion_matrix.size, 2))
for i, j in iters:
    plt.text(j, i, format(confusion_matrix[i, j]), va='center', ha='center')  # 显示对应的数字

plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure13.png")
with open("BaseWeb/src/main/resources/static/data/figure13_data.txt", "w") as f:
    for i in range(classNamber):
        for j in range(classNamber):
            f.write(f"{confusion_matrix[i, j]}, ")
        f.write("\n")
#  plt.show()
plt.close()
# In[238]:


# confusion_matrix
from sklearn.metrics import RocCurveDisplay
import numpy as np
import matplotlib.pyplot as plt

# classes = ['A','B','C','D','E']
# confusion_matrix = np.array([(9,1,3,4,0),(2,13,1,3,4),(1,4,10,0,13),(3,1,1,17,0),(0,0,0,1,14)],dtype=float64)


# 标签
classes = ['good prognosis', 'poor prognosis']

classNamber = 2  # 类别数量

# 混淆矩阵

confusion_matrix = np.array([
    (22, 2),
    (2, 30)
], dtype=float)

# 绘制第十四张图 ############################################################
plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Purples, alpha=0.5)  # 按照像素显示出矩阵
plt.title('confusion_matrix-XGBoost')  # 改图名
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=-45)
plt.yticks(tick_marks, classes)

thresh = confusion_matrix.max() / 2.
# iters = [[i,j] for i in range(len(classes)) for j in range((classes))]
# ij配对，遍历矩阵迭代器
iters = np.reshape([[[i, j] for j in range(classNamber)] for i in range(classNamber)], (confusion_matrix.size, 2))
for i, j in iters:
    plt.text(j, i, format(confusion_matrix[i, j]), va='center', ha='center', alpha=1.0)  # 显示对应的数字

plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure14.png")
with open("BaseWeb/src/main/resources/static/data/figure14_data.txt", "w") as f:
    for i in range(classNamber):
        for j in range(classNamber):
            f.write(f"{confusion_matrix[i, j]}, ")
        f.write("\n")
#  plt.show()
plt.close()
# In[239]:


# confusion_matrix
from sklearn.metrics import RocCurveDisplay
import numpy as np
import matplotlib.pyplot as plt

# classes = ['A','B','C','D','E']
# confusion_matrix = np.array([(9,1,3,4,0),(2,13,1,3,4),(1,4,10,0,13),(3,1,1,17,0),(0,0,0,1,14)],dtype=float64)


# 标签
classes = ['KRAS wild type', 'KRAS mutant type']

classNamber = 2  # 类别数量

# 混淆矩阵

confusion_matrix = np.array([
    (16, 5),
    (3, 18)
], dtype=float)

# 绘制第十五张图 ############################################################
plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Blues, alpha=0.5)  # 按照像素显示出矩阵
plt.title('confusion_matrix-LR')  # 改图名
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=-45)
plt.yticks(tick_marks, classes)

thresh = confusion_matrix.max() / 2.
# iters = [[i,j] for i in range(len(classes)) for j in range((classes))]
# ij配对，遍历矩阵迭代器
iters = np.reshape([[[i, j] for j in range(classNamber)] for i in range(classNamber)], (confusion_matrix.size, 2))
for i, j in iters:
    plt.text(j, i, format(confusion_matrix[i, j]), va='center', ha='center', alpha=1.0)  # 显示对应的数字

plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure15.png")
with open("BaseWeb/src/main/resources/static/data/figure15_data.txt", "w") as f:
    for i in range(classNamber):
        for j in range(classNamber):
            f.write(f"{confusion_matrix[i, j]}, ")
        f.write("\n")
#  plt.show()
plt.close()
# In[240]:


# -*- coding: utf-8 -*-
## reference: https://blog.csdn.net/liuqiang3/article/details/102866673
# !/usr/bin/env python3

# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import scipy.stats
from scipy import stats


# AUC comparison adapted from
# https://github.com/Netflix/vmaf/
def compute_midrank(x):
    """Computes midranks.
   Args:
      x - a 1D numpy array
   Returns:
      array of midranks
   """
    J = np.argsort(x)
    Z = x[J]
    N = len(x)
    T = np.zeros(N, dtype=float)
    i = 0
    while i < N:
        j = i
        while j < N and Z[j] == Z[i]:
            j += 1
        T[i:j] = 0.5 * (i + j - 1)
        i = j
    T2 = np.empty(N, dtype=float)
    # Note(kazeevn) +1 is due to Python using 0-based indexing
    # instead of 1-based in the AUC formula in the paper
    T2[J] = T + 1
    return T2


def compute_midrank_weight(x, sample_weight):
    """Computes midranks.
   Args:
      x - a 1D numpy array
   Returns:
      array of midranks
   """
    J = np.argsort(x)
    Z = x[J]
    cumulative_weight = np.cumsum(sample_weight[J])
    N = len(x)
    T = np.zeros(N, dtype=float)
    i = 0
    while i < N:
        j = i
        while j < N and Z[j] == Z[i]:
            j += 1
        T[i:j] = cumulative_weight[i:j].mean()
        i = j
    T2 = np.empty(N, dtype=float)
    T2[J] = T
    return T2


def fastDeLong(predictions_sorted_transposed, label_1_count, sample_weight=None):
    if sample_weight is None:

        return fastDeLong_no_weights(predictions_sorted_transposed, label_1_count)

    else:

        return fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight)


def fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight):
    """

   The fast version of DeLong's method for computing the covariance of

   unadjusted AUC.

   Args:

      predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]

         sorted such as the examples with label "1" are first

   Returns:

      (AUC value, DeLong covariance)

   Reference:

    @article{sun2014fast,

      title={Fast Implementation of DeLong's Algorithm for

             Comparing the Areas Under Correlated Receiver Oerating Characteristic Curves},

      author={Xu Sun and Weichao Xu},

      journal={IEEE Signal Processing Letters},

      volume={21},

      number={11},

      pages={1389--1393},

      year={2014},

      publisher={IEEE}

    }

   """

    # Short variables are named as they are in the paper

    m = label_1_count

    n = predictions_sorted_transposed.shape[1] - m

    positive_examples = predictions_sorted_transposed[:, :m]

    negative_examples = predictions_sorted_transposed[:, m:]

    k = predictions_sorted_transposed.shape[0]

    tx = np.empty([k, m], dtype=float)

    ty = np.empty([k, n], dtype=float)

    tz = np.empty([k, m + n], dtype=float)

    for r in range(k):
        tx[r, :] = compute_midrank_weight(positive_examples[r, :], sample_weight[:m])

        ty[r, :] = compute_midrank_weight(negative_examples[r, :], sample_weight[m:])

        tz[r, :] = compute_midrank_weight(predictions_sorted_transposed[r, :], sample_weight)

    total_positive_weights = sample_weight[:m].sum()

    total_negative_weights = sample_weight[m:].sum()

    pair_weights = np.dot(sample_weight[:m, np.newaxis], sample_weight[np.newaxis, m:])

    total_pair_weights = pair_weights.sum()

    aucs = (sample_weight[:m] * (tz[:, :m] - tx)).sum(axis=1) / total_pair_weights

    v01 = (tz[:, :m] - tx[:, :]) / total_negative_weights

    v10 = 1. - (tz[:, m:] - ty[:, :]) / total_positive_weights

    sx = np.cov(v01)

    sy = np.cov(v10)

    delongcov = sx / m + sy / n

    return aucs, delongcov


def fastDeLong_no_weights(predictions_sorted_transposed, label_1_count):
    """

   The fast version of DeLong's method for computing the covariance of

   unadjusted AUC.

   Args:

      predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]

         sorted such as the examples with label "1" are first

   Returns:

      (AUC value, DeLong covariance)

   Reference:

    @article{sun2014fast,

      title={Fast Implementation of DeLong's Algorithm for

             Comparing the Areas Under Correlated Receiver Oerating

             Characteristic Curves},

      author={Xu Sun and Weichao Xu},

      journal={IEEE Signal Processing Letters},

      volume={21},

      number={11},

      pages={1389--1393},

      year={2014},

      publisher={IEEE}

    }

   """

    # Short variables are named as they are in the paper

    m = label_1_count
    n = predictions_sorted_transposed.shape[1] - m
    positive_examples = predictions_sorted_transposed[:, :m]
    negative_examples = predictions_sorted_transposed[:, m:]
    k = predictions_sorted_transposed.shape[0]

    tx = np.empty([k, m], dtype=float)
    ty = np.empty([k, n], dtype=float)
    tz = np.empty([k, m + n], dtype=float)

    for r in range(k):
        tx[r, :] = compute_midrank(positive_examples[r, :])
        ty[r, :] = compute_midrank(negative_examples[r, :])
        tz[r, :] = compute_midrank(predictions_sorted_transposed[r, :])

    aucs = tz[:, :m].sum(axis=1) / m / n - float(m + 1.0) / 2.0 / n
    v01 = (tz[:, :m] - tx[:, :]) / n
    v10 = 1.0 - (tz[:, m:] - ty[:, :]) / m

    sx = np.cov(v01)
    sy = np.cov(v10)
    delongcov = sx / m + sy / n

    return aucs, delongcov


def calc_pvalue(aucs, sigma):
    """Computes log(10) of p-values.
   Args:
      aucs: 1D array of AUCs
      sigma: AUC DeLong covariances
   Returns:
      log10(pvalue)

   """

    l = np.array([[1, -1]])

    z = np.abs(np.diff(aucs)) / (np.sqrt(np.dot(np.dot(l, sigma), l.T)) + 1e-8)
    pvalue = 2 * (1 - scipy.stats.norm.cdf(np.abs(z)))
    #  print(10**(np.log10(2) + scipy.stats.norm.logsf(z, loc=0, scale=1) / np.log(10)))
    return pvalue


def compute_ground_truth_statistics(ground_truth, sample_weight=None):
    assert np.array_equal(np.unique(ground_truth), [0, 1])
    order = (-ground_truth).argsort()
    label_1_count = int(ground_truth.sum())
    if sample_weight is None:
        ordered_sample_weight = None
    else:
        ordered_sample_weight = sample_weight[order]

    return order, label_1_count, ordered_sample_weight


def delong_roc_variance(ground_truth, predictions):
    """
   Computes ROC AUC variance for a single set of predictions
   Args:
      ground_truth: np.array of 0 and 1
      predictions: np.array of floats of the probability of being class 1
   """
    sample_weight = None
    order, label_1_count, ordered_sample_weight = compute_ground_truth_statistics(
        ground_truth, sample_weight)
    predictions_sorted_transposed = predictions[np.newaxis, order]
    aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count)

    assert len(aucs) == 1, "There is a bug in the code, please forward this to the developers"
    return aucs[0], delongcov


def delong_roc_test(ground_truth, predictions_one, predictions_two):
    """
   Computes log(p-value) for hypothesis that two ROC AUCs are different
   Args:
      ground_truth: np.array of 0 and 1
      predictions_one: predictions of the first model,
         np.array of floats of the probability of being class 1
      predictions_two: predictions of the second model,
         np.array of floats of the probability of being class 1
   """
    sample_weight = None
    order, label_1_count, ordered_sample_weight = compute_ground_truth_statistics(ground_truth)
    predictions_sorted_transposed = np.vstack((predictions_one, predictions_two))[:, order]
    aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count, sample_weight)

    return calc_pvalue(aucs, delongcov)


def delong_roc_ci(y_true, y_pred):
    aucs, auc_cov = delong_roc_variance(y_true, y_pred)
    auc_std = np.sqrt(auc_cov)
    lower_upper_q = np.abs(np.array([0, 1]) - (1 - alpha) / 2)
    ci = stats.norm.ppf(
        lower_upper_q,
        loc=aucs,
        scale=auc_std)
    ci[ci > 1] = 1
    return aucs, ci


# In[241]:


preds_A = np.array(y_test_prob)
preds_B = np.array(y_proba_test)
actual = np.array(y_test)

# In[242]:


delong_roc_test(actual, preds_A, preds_B)

# In[243]:


from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt

# In[244]:


clf_GNB = GaussianNB()
clf_GNB.fit(X_train_mul_scal_lasso, y_train)
score_train = clf_GNB.score(X_train_mul_scal_lasso, y_train)
score_test = clf_GNB.score(X_test_mul_scal_lasso, y_test)
score_train, score_test

# In[245]:


clf_GNB.class_count_

# In[246]:


clf_GNB.class_prior_

# In[247]:


from sklearn.metrics import confusion_matrix  # 测试集混淆矩阵

print(confusion_matrix(y_test, clf_GNB.predict(X_test_mul_scal_lasso)))

# In[248]:


from sklearn.metrics import roc_curve, roc_auc_score

fpr, tpr, thresholds = roc_curve(y_test, clf_GNB.predict_proba(X_test_mul_scal_lasso)[:, 1])
# fpr, tpr, thresholds


# In[249]:


# clf_GNB.predict_proba(X_test_mul_scal_lasso)


# In[250]:


roc_auc_score(y_test, clf_GNB.predict_proba(X_test_mul_scal_lasso)[:, 1])

# In[251]:


from sklearn.metrics import RocCurveDisplay
import matplotlib.pyplot as plt


# 绘制第十六张图 ############################################################
RocCurveDisplay.from_estimator(clf_GNB, X_test_mul_scal_lasso, y_test)
y_pred1NB = clf_GNB.predict(X_train_mul_scal_lasso)
y_pred2NB = clf_GNB.predict(X_test_mul_scal_lasso)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure16.png")
with open("BaseWeb/src/main/resources/static/data/figure16_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[252]:


from sklearn.metrics import confusion_matrix  # NB测试集

confusion_matrix(y_test, y_pred2NB)

# In[253]:


from sklearn.metrics import confusion_matrix  # NB训练集

confusion_matrix(y_train, y_pred1NB)

# In[254]:


from sklearn.metrics import RocCurveDisplay
import matplotlib.pyplot as plt


# 绘制第十七张图 ############################################################
RocCurveDisplay.from_estimator(clf_GNB, X_train_mul_scal_lasso, y_train)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure17.png")
with open("BaseWeb/src/main/resources/static/data/figure17_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[282]:


from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split
import pandas as pd
import graphviz
import os

# get_ipython().run_line_magic('matplotlib', 'inline')

# In[283]:


clf_dt = DecisionTreeClassifier()
clf_dt.fit(X_train_mul_scal_lasso, y_train)
score = clf_dt.score(X_test_mul_scal_lasso, y_test)
score

# In[284]:


roc_auc_score(y_test, clf_dt.predict_proba(X_test_mul_scal_lasso)[:, 1])

# In[285]:


from sklearn.metrics import RocCurveDisplay  # 决策树分类器
import matplotlib.pyplot as plt


# 绘制第十八张图 ############################################################
RocCurveDisplay.from_estimator(clf_dt, X_test_mul_scal_lasso, y_test)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure18.png")
with open("BaseWeb/src/main/resources/static/data/figure18_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[286]:


from sklearn.metrics import RocCurveDisplay  # 决策树分类器
import matplotlib.pyplot as plt

# 绘制第十九张图 ############################################################
RocCurveDisplay.from_estimator(clf_dt, X_train_mul_scal_lasso, y_train)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure19.png")
with open("BaseWeb/src/main/resources/static/data/figure19_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[288]:


y_pred1DTC = clf_dt.predict(X_train_mul_scal_lasso)
y_pred2DTC = clf_dt.predict(X_test_mul_scal_lasso)

# In[289]:


from sklearn.metrics import confusion_matrix  # 决策分类器测试集

confusion_matrix(y_test, y_pred2DTC)

# In[290]:


from sklearn.metrics import confusion_matrix  # 决策分类器训练集

confusion_matrix(y_train, y_pred1DTC)

# In[291]:


# 随机森林
# get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt

# In[292]:


clf_rf = RandomForestClassifier()
clf_rf.fit(X_train_mul_scal_lasso, y_train)
score = clf_rf.score(X_test_mul_scal_lasso, y_test)
score

# In[293]:


from sklearn.metrics import RocCurveDisplay  # 随机森林分类器
import matplotlib.pyplot as plt

# 绘制第二十张图 ############################################################
RocCurveDisplay.from_estimator(clf_rf, X_test_mul_scal_lasso, y_test)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure20.png")
with open("BaseWeb/src/main/resources/static/data/figure20_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[294]:


from sklearn.metrics import confusion_matrix  # RF测试集混淆矩阵

print(confusion_matrix(y_test, clf_rf.predict(X_test_mul_scal_lasso)))

# In[295]:


from sklearn.metrics import confusion_matrix  # RF训练混淆矩阵

print(confusion_matrix(y_train, clf_rf.predict(X_train_mul_scal_lasso)))

# In[296]:


# KNN
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split

# 构建KNN分类器
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train_mul_scal_lasso, y_train)
roc_auc_score(y_test, knn.predict_proba(X_test_mul_scal_lasso)[:, 1])
from sklearn.metrics import RocCurveDisplay
import matplotlib.pyplot as plt

# 绘制第二十一张图 ############################################################
RocCurveDisplay.from_estimator(knn, X_test_mul_scal_lasso, y_test)
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure21.png")
with open("BaseWeb/src/main/resources/static/data/figure21_data.txt", "w") as f:
    for fp, tp in zip(fpr, tpr):
        f.write(f"{fp}, {tp}\n")
#  plt.show()
plt.close()
# In[297]:


from sklearn.metrics import confusion_matrix  # KNN测试集混淆矩阵

print(confusion_matrix(y_test, knn.predict(X_test_mul_scal_lasso)))

# In[298]:


from sklearn.metrics import confusion_matrix  # KNN训练集混淆矩阵

print(confusion_matrix(y_train, knn.predict(X_train_mul_scal_lasso)))

# In[299]:


from sklearn.calibration import calibration_curve

clf_rf = RandomForestClassifier()
clf_rf.fit(X_train_mul_scal_lasso, y_train)
# 随机森林
fraction_of_positivesRF, mean_predicted_valueRF = \
    calibration_curve(y_test, clf_rf.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# XGBOOST
model.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesXGB, mean_predicted_valueXGB = \
    calibration_curve(y_test, model.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# 决策树分类器
clf_dt.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesDTC, mean_predicted_valueDTC = \
    calibration_curve(y_test, clf_dt.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# LR分类器
classifier.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesLR, mean_predicted_valueLR = \
    calibration_curve(y_test, classifier.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# SVM分类器
svc.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesSVM, mean_predicted_valueSVM = \
    calibration_curve(y_test, svc.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# 高斯朴素贝叶斯分类器
clf_GNB.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesGNB, mean_predicted_valueGNB = \
    calibration_curve(y_test, clf_GNB.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# KNN
knn.fit(X_train_mul_scal_lasso, y_train)
fraction_of_positivesKNN, mean_predicted_valueKNN = \
    calibration_curve(y_test, knn.predict_proba(X_test_mul_scal_lasso)[:, 1], n_bins=5
                      #                   , strategy = 'quantile'
                      )
# fraction_of_positives, mean_predicted_value


# In[300]:

# 绘制第二十二张图 ############################################################
plt.plot(mean_predicted_valueRF, fraction_of_positivesRF, marker='s', linestyle='-', color='greenyellow', lw=1.6,
         label='Random Forest')
plt.plot(mean_predicted_valueDTC, fraction_of_positivesDTC, marker='s', linestyle='-', color='lightskyblue', lw=1.6,
         label='DecisionTree')
plt.plot(mean_predicted_valueXGB, fraction_of_positivesXGB, marker='s', linestyle='-', color='lightsalmon', lw=1.6,
         label='XGBoost')
plt.plot(mean_predicted_valueLR, fraction_of_positivesLR, marker='s', linestyle='-', color='aquamarine', lw=1.6,
         label='LR')
plt.plot(mean_predicted_valueSVM, fraction_of_positivesSVM, marker='s', linestyle='-', color='hotpink', lw=1.6,
         label='SVM')
plt.plot(mean_predicted_valueGNB, fraction_of_positivesGNB, marker='s', linestyle='-', color='violet', lw=1.6,
         label='GaussianNB')
plt.plot(mean_predicted_valueKNN, fraction_of_positivesKNN, marker='s', linestyle='-', color='peachpuff', lw=1.6,
         label='KNN')
plt.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
plt.xlabel("Mean predicted value")
plt.ylabel("Fraction of positives")
plt.title("Calibration Curve")
plt.legend(loc="lower right")
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure22.png")
with open("BaseWeb/src/main/resources/static/data/figure22_data.txt", "w") as f:
    for mean_pred, frac in zip(mean_predicted_valueRF, fraction_of_positivesRF):
        f.write(f"{mean_pred}, {frac}\n")
#  plt.show()
plt.close()

# In[ ]:


# In[301]:


def multi_models_roc(names, sampling_methods, colors, X_test, y_test, save=True, dpin=100):
    """
       将多个机器模型的roc图输出到一张图上

       Args:
           names: list, 多个模型的名称
           sampling_methods: list, 多个模型的实例化对象
           save: 选择是否将结果保存（默认为png格式）

       Returns:
           返回图片对象plt
       """
    plt.figure(figsize=(20, 20), dpi=dpin)

    for (name, method, colorname) in zip(names, sampling_methods, colors):
        y_test_preds = method.predict(X_test)
        y_test_predprob = method.predict_proba(X_test)[:, 1]
        fpr, tpr, thresholds = roc_curve(y_test, y_test_predprob, pos_label=1)

        plt.plot(fpr, tpr, lw=5, label='{} (AUC={:.3f})'.format(name, auc(fpr, tpr)), color=colorname)
        plt.plot([0, 1], [0, 1], '--', lw=5, color='grey')
        plt.axis('square')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.xlabel('False Positive Rate', fontsize=20)
        plt.ylabel('True Positive Rate', fontsize=20)
        plt.title('ROC Curve', fontsize=25)
        plt.legend(loc='lower right', fontsize=20)

    # if save:
    # plt.savefig('multi_models_roc.png')

    return plt


# In[302]:


names = ['SVM',
         'LR',
         'XGBoost',
         'GaussianNB',
         ' DecisionTree',
         'Random Forest',
         'KNN']

sampling_methods = [svc,
                    classifier,
                    model,
                    clf_GNB,
                    clf_dt,
                    clf_rf,
                    knn]

colors = ['hotpink',
          'aquamarine',
          'lightsalmon',
          'violet',
          'lightskyblue',
          'greenyellow',
          'peachpuff']


# 绘制第二十三张图 ############################################################
# ROC curves
test_roc_graph = multi_models_roc(names, sampling_methods, colors, X_test_mul_scal_lasso, y_test, save=True)
test_roc_graph.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure23.png")
with open("BaseWeb/src/main/resources/static/data/figure23_data.txt", "w") as f:
    for (name, method) in zip(names, sampling_methods):
        y_test_predprob = method.predict_proba(X_test_mul_scal_lasso)[:, 1]
        fpr, tpr, _ = roc_curve(y_test, y_test_predprob, pos_label=1)
        for fp, tp in zip(fpr, tpr):
            f.write(f"{name}, {fp}, {tp}\n")

# test_roc_graph.savefig('ROC_Test_all.png')


# In[303]:


def multi_models_roc(names, sampling_methods, colors, X_train_mul_scal_lasso, y_train, save=True, dpin=100):
    """
       将多个机器模型的roc图输出到一张图上

       Args:
           names: list, 多个模型的名称
           sampling_methods: list, 多个模型的实例化对象
           save: 选择是否将结果保存（默认为png格式）

       Returns:
           返回图片对象plt
       """
    plt.figure(figsize=(20, 20), dpi=dpin)

    for (name, method, colorname) in zip(names, sampling_methods, colors):
        y_train_preds = method.predict(X_train_mul_scal_lasso)
        y_train_predprob = method.predict_proba(X_train_mul_scal_lasso)[:, 1]
        fpr, tpr, thresholds = roc_curve(y_train, y_train_predprob, pos_label=1)

        plt.plot(fpr, tpr, lw=5, label='{} (AUC={:.3f})'.format(name, auc(fpr, tpr)), color=colorname)
        plt.plot([0, 1], [0, 1], '--', lw=5, color='grey')
        plt.axis('square')
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.xlabel('False Positive Rate', fontsize=20)
        plt.ylabel('True Positive Rate', fontsize=20)
        plt.title('ROC Curve', fontsize=25)
        plt.legend(loc='lower right', fontsize=20)

    # if save:
    # plt.savefig('multi_models_roc.png')

    return plt


# In[304]:


names = ['SVM',
         'LR',
         'XGBoost',
         'GaussianNB',
         ' DecisionTree',
         'Random Forest',
         'KNN']

sampling_methods = [svc,
                    classifier,
                    model,
                    clf_GNB,
                    clf_dt,
                    clf_rf,
                    knn]

colors = ['hotpink',
          'aquamarine',
          'lightsalmon',
          'violet',
          'lightskyblue',
          'greenyellow',
          'peachpuff']


# 绘制第二十四张图 ############################################################
# ROC curves
train_roc_graph = multi_models_roc(names, sampling_methods, colors, X_train_mul_scal_lasso, y_train, save=True)
train_roc_graph.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure24.png")
with open("BaseWeb/src/main/resources/static/data/figure24_data.txt", "w") as f:
    for (name, method) in zip(names, sampling_methods):
        y_train_predprob = method.predict_proba(X_train_mul_scal_lasso)[:, 1]
        fpr, tpr, _ = roc_curve(y_train, y_train_predprob, pos_label=1)
        for fp, tp in zip(fpr, tpr):
            f.write(f"{name}, {fp}, {tp}\n")
# train_roc_graph.savefig('ROC_Train_all.png')


# In[305]:


y_test_prob_SVM = svc.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_XGBoost = model.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_LR = classifier.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_DT = clf_dt.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_RF = clf_rf.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_NB = clf_GNB.predict_proba(X_test_mul_scal_lasso)[:, 1]
y_test_prob_KNN = knn.predict_proba(X_test_mul_scal_lasso)[:, 1]

# In[306]:


preds_SVM = np.array(y_test_prob_SVM)
preds_XGBoost = np.array(y_test_prob_XGBoost)
preds_LR = np.array(y_test_prob_LR)
preds_DT = np.array(y_test_prob_DT)
preds_RF = np.array(y_test_prob_RF)
preds_NB = np.array(y_test_prob_NB)
preds_KNN = np.array(y_test_prob_KNN)
actual = np.array(y_test)

# In[307]:


delong_roc_test(actual, preds_RF, preds_XGBoost)

# In[308]:


clf_dt = DecisionTreeClassifier(criterion='entropy'
                                , random_state=11
                                , max_depth=3
                                , min_samples_leaf=5
                                , min_samples_split=25
                                )
clf_dt.fit(X_train_mul_scal_lasso, y_train)
score_test = clf_dt.score(X_test_mul_scal_lasso, y_test)
score_train = clf_dt.score(X_train_mul_scal_lasso, y_train)
print(score_train, score_test)

# In[309]:


featureList = X_train_mul_scal_lasso.columns
dot_data = export_graphviz(clf_dt
                           , out_file=None
                           , feature_names=featureList
                           , class_names=['low-medium level TB', 'high level TB']
                           )

graph = graphviz.Source(dot_data)

graph
# graph.view()


# In[310]:


import matplotlib.pyplot as plt

# 绘制第二十四张图 ############################################################
scoreTrainList, scoreTestList = [], []
for i in range(1, 10):
    clf_dt = DecisionTreeClassifier(criterion='entropy'
                                    , random_state=11
                                    , max_depth=i
                                    , min_samples_leaf=5
                                    , min_samples_split=25
                                    )
    clf_dt.fit(X_train_mul_scal_lasso, y_train)
    score_test = clf_dt.score(X_test_mul_scal_lasso, y_test)
    score_train = clf_dt.score(X_train_mul_scal_lasso, y_train)
    scoreTestList.append(score_test)
    scoreTrainList.append(score_train)
    print(score_train, score_test)
plt.plot(range(1, 10), scoreTestList, label='Test')
plt.plot(range(1, 10), scoreTrainList, label='Train')
plt.legend()
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure25.png")
with open("BaseWeb/src/main/resources/static/data/figure25_data.txt", "w") as f:
    for i, test_score, train_score in zip(range(1, 10), scoreTestList, scoreTrainList):
        f.write(f"{i}, {test_score}, {train_score}\n")
#  plt.show()
plt.close()
# In[171]:


# 导入库
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_auc_score, recall_score, precision_score, \
    roc_curve
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import KFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import cross_val_score
from matplotlib import pyplot
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.model_selection import cross_validate
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np


# In[176]:


# 绘制roc曲线
def calculate_auc(y_test, pred):
    print("auc:", roc_auc_score(y_test, pred))
    fpr, tpr, thersholds = roc_curve(y_test, pred)
    roc_auc = auc(fpr, tpr)
    plt.plot(fpr, tpr, 'k-', label='ROC (area = {0:.2f})'.format(roc_auc), color='blue', lw=2)
    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curve')
    plt.legend(loc="lower right")
    plt.plot([0, 1], [0, 1], 'k--')
    #  plt.show()
    plt.close()

# 使用Yooden法寻找最佳阈值
def Find_Optimal_Cutoff(TPR, FPR, threshold):
    y = TPR - FPR
    Youden_index = np.argmax(y)  # Only the first occurrence is returned.
    optimal_threshold = threshold[Youden_index]
    point = [FPR[Youden_index], TPR[Youden_index]]
    return optimal_threshold, point


# 计算roc值
def ROC(label, y_prob):
    fpr, tpr, thresholds = roc_curve(label, y_prob)
    roc_auc = auc(fpr, tpr)
    optimal_threshold, optimal_point = Find_Optimal_Cutoff(TPR=tpr, FPR=fpr, threshold=thresholds)
    return fpr, tpr, roc_auc, optimal_threshold, optimal_point


# 计算混淆矩阵
def calculate_metric(label, y_prob, optimal_threshold):
    p = []
    for i in y_prob:
        if i >= optimal_threshold:
            p.append(1)
        else:
            p.append(0)
    confusion = confusion_matrix(label, p)
    print(confusion)
    TP = confusion[1, 1]
    TN = confusion[0, 0]
    FP = confusion[0, 1]
    FN = confusion[1, 0]
    Accuracy = (TP + TN) / float(TP + TN + FP + FN)
    Sensitivity = TP / float(TP + FN)
    Specificity = TN / float(TN + FP)
    return Accuracy, Sensitivity, Specificity


# 多模型比较：
models = [('LR', LogisticRegression(max_iter=5000)),
          ('XGBoost', XGBClassifier()),
          ('KNN', KNeighborsClassifier()),
          ('SVM', SVC(probability=True)),
          ('GNB', GaussianNB()),
          ('DT', DecisionTreeClassifier(random_state=0)),
          ('RF', RandomForestClassifier(max_depth=2, random_state=0))]

# 循环训练模型
results = []
roc_ = []
for name, model in models:
    clf = model.fit(X_train_mul_scal_lasso, y_train)
    pred_proba = clf.predict_proba(X_test_mul_scal_lasso)
    y_prob = pred_proba[:, 1]
    fpr, tpr, roc_auc, Optimal_threshold, optimal_point = ROC(y_test, y_prob)
    Accuracy, Sensitivity, Specificity = calculate_metric(y_test, y_prob, Optimal_threshold)
    result = [Optimal_threshold, Accuracy, Sensitivity, Specificity, roc_auc, name]
    results.append(result)
    roc_.append([fpr, tpr, roc_auc, name])

df_result = pd.DataFrame(results)
df_result.columns = ["Optimal_threshold", "Accuracy", "Sensitivity", "Specificity", "AUC_ROC", "Model_name"]


# 绘制第二十五张图 ############################################################
# 绘制多组对比roc曲线
color = ["darkorange", "navy", "red", "green", "yellow", "pink", "blue"]
plt.figure()
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(roc_[0][0], roc_[0][1], color=color[0], lw=lw, label=roc_[0][3] + ' (AUC = %0.3f)' % roc_[0][2])
plt.plot(roc_[1][0], roc_[1][1], color=color[1], lw=lw, label=roc_[1][3] + ' (AUC = %0.3f)' % roc_[1][2])
plt.plot(roc_[2][0], roc_[2][1], color=color[2], lw=lw, label=roc_[2][3] + ' (AUC = %0.3f)' % roc_[2][2])
plt.plot(roc_[3][0], roc_[3][1], color=color[3], lw=lw, label=roc_[3][3] + ' (AUC = %0.3f)' % roc_[3][2])
plt.plot(roc_[4][0], roc_[4][1], color=color[4], lw=lw, label=roc_[4][3] + ' (AUC = %0.3f)' % roc_[4][2])
plt.plot(roc_[5][0], roc_[5][1], color=color[5], lw=lw, label=roc_[5][3] + ' (AUC = %0.3f)' % roc_[5][2])
plt.plot(roc_[6][0], roc_[6][1], color=color[6], lw=lw, label=roc_[6][3] + ' (AUC = %0.3f)' % roc_[6][2])
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Curve')
plt.legend(loc="lower right")
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure26.png")
with open("BaseWeb/src/main/resources/static/data/figure26_data.txt", "w") as f:
    for i, result in enumerate(roc_):
        for fp, tp in zip(result[0], result[1]):
            f.write(f"{result[3]}, {fp}, {tp}\n")
# plt.savefig("roc_curve.png",dpi=300)
#  plt.show()
plt.close()
# In[173]:


# 循环训练模型
results = []
roc_ = []
for name, model in models:
    clf = model.fit(X_train_mul_scal_lasso, y_train)
    pred_proba = clf.predict_proba(X_train_mul_scal_lasso)
    y_prob = pred_proba[:, 1]
    fpr, tpr, roc_auc, Optimal_threshold, optimal_point = ROC(y_train, y_prob)
    Accuracy, Sensitivity, Specificity = calculate_metric(y_train, y_prob, Optimal_threshold)
    result = [Optimal_threshold, Accuracy, Sensitivity, Specificity, roc_auc, name]
    results.append(result)
    roc_.append([fpr, tpr, roc_auc, name])

df_result = pd.DataFrame(results)
df_result.columns = ["Optimal_threshold", "Accuracy", "Sensitivity", "Specificity", "AUC_ROC", "Model_name"]


# 绘制第二十六张图 ############################################################
# 绘制多组对比roc曲线
color = ["darkorange", "navy", "red", "green", "yellow", "pink", "blue"]
plt.figure()
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(roc_[0][0], roc_[0][1], color=color[0], lw=lw, label=roc_[0][3] + ' (AUC = %0.3f)' % roc_[0][2])
plt.plot(roc_[1][0], roc_[1][1], color=color[1], lw=lw, label=roc_[1][3] + ' (AUC = %0.3f)' % roc_[1][2])
plt.plot(roc_[2][0], roc_[2][1], color=color[2], lw=lw, label=roc_[2][3] + ' (AUC = %0.3f)' % roc_[2][2])
plt.plot(roc_[3][0], roc_[3][1], color=color[3], lw=lw, label=roc_[3][3] + ' (AUC = %0.3f)' % roc_[3][2])
plt.plot(roc_[4][0], roc_[4][1], color=color[4], lw=lw, label=roc_[4][3] + ' (AUC = %0.3f)' % roc_[4][2])
plt.plot(roc_[5][0], roc_[5][1], color=color[5], lw=lw, label=roc_[5][3] + ' (AUC = %0.3f)' % roc_[5][2])
plt.plot(roc_[6][0], roc_[6][1], color=color[6], lw=lw, label=roc_[6][3] + ' (AUC = %0.3f)' % roc_[6][2])
plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic Curve')
plt.legend(loc="lower right")
plt.savefig("BaseWeb/src/main/resources/static/picture/"+user+"/"+filePath+"/figure27.png")
with open("BaseWeb/src/main/resources/static/data/figure27_data.txt", "w") as f:
    for i, result in enumerate(roc_):
        for fp, tp in zip(result[0], result[1]):
            f.write(f"{result[3]}, {fp}, {tp}\n")
# plt.savefig("roc_curve.png",dpi=300)
#  plt.show()
plt.close()
# In[ ]:
