# -*- coding: UTF-8 -*-
import time
import warnings
import numpy as np
import pandas as pd
from sklearn.svm import SVC
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold

warnings.filterwarnings('ignore')
start = time.time()
train = pd.read_csv('./data/datasets/train.csv')
test = pd.read_csv('./data/datasets/test.csv')
predictors = train.drop(['stroke', 'id'], axis=1)
target = train["stroke"]

# 设置 kfold，交叉采样法拆分数据集
kfold = StratifiedKFold(n_splits=10)

# 汇总不同模型算法
classifiers = []
classifiers.append(SVC(gamma='auto'))
classifiers.append(DecisionTreeClassifier())
classifiers.append(RandomForestClassifier(n_estimators=100))
classifiers.append(GaussianNB())
classifiers.append(KNeighborsClassifier())
classifiers.append(LogisticRegression(solver='liblinear'))
classifiers.append(LinearDiscriminantAnalysis())

# 不同机器学习交叉验证结果汇总
cv_results = []
for classifier in classifiers:
    cv_results.append(cross_val_score(classifier, predictors, target, scoring='accuracy', cv=kfold, n_jobs=-1))

cv_means = []
cv_std = []
for cv_result in cv_results:
    cv_means.append(cv_result.mean())
    cv_std.append(cv_result.std())

cvResDf = pd.DataFrame({'cv_mean': cv_means, 'cv_std': cv_std,
                        'algorithm': ['SVC', 'DecisionTreeCla', 'RandomForestCla', 'GaussianNBCla', 'KNN', 'LR',
                                      'LinearDiscrimiAna']})
print(cvResDf)
plt.boxplot(cv_results)
plt.xticks(np.arange(len(cv_results) + 1), ['', 'SVC', 'DTC', 'RFC', 'GNBC', 'KNN', 'LR', 'LDA'], rotation=360)
plt.title('Stroke Model')
plt.savefig('./data/img/Stroke Model.jpg')
plt.show()

# KNN性能测试
modelgsKNN = KNeighborsClassifier()
knn_param_grid = {'n_neighbors': [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21]}
modelgsKNN = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=knn_param_grid, cv=kfold, scoring="accuracy",
                          n_jobs=-1, verbose=1)
modelgsKNN.fit(predictors, target)

# modelgsKNN模型
print('modelgsKNN模型得分为：%.3f' % modelgsKNN.best_score_)

modelgsKNNtestpre_y = modelgsKNN.predict(predictors).astype(int)
print('KNN模型混淆矩阵为\n', confusion_matrix(target.astype(int).astype(str), modelgsKNNtestpre_y.astype(str)))

# 分类结果
ids = test['id']
predictions = modelgsKNN.predict(test.drop(['stroke', 'id', 'predict'], axis=1))

# 将输出转换为dataframe并保存到submission.csv文件中
output = pd.DataFrame({'id': ids, 'predict': predictions})
output.to_csv('./data/submission_KNN.csv', index=False)

# 将得到的预测结果写进text.csv里
data = pd.read_csv(r'./data/datasets/test.csv')
data1 = pd.read_csv(r'./data/submission_KNN.csv')
data['predict'] = data1['predict']
data.to_csv(r'./data/datasets/test.csv', index=False, sep=',')

'''准确率、查准率、查全率、F1值'''
# 求出测试数据模型的预测值
modelgsKNNtestpre_y = modelgsKNN.predict(predictors).astype(int)
# 计算并绘制每个类别的 ROC 曲线和 ROC 面积
fpr, tpr, threshold = roc_curve(target, modelgsKNNtestpre_y)  # 计算真正率和假正率
roc_auc = auc(fpr, tpr)  # 计算auc的值
print("准确率: \t", roc_auc)
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示符号
plt.figure()
lw = 2
plt.figure(figsize=(10, 10))
plt.plot(fpr, tpr, color='r', lw=lw, label='ROC curve(area=%0.3f)' % roc_auc)  # 假正率位横坐标，真正率为纵坐标做曲线
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('假正例率')
plt.ylabel('真正例率')
plt.title('Stroke Model')
plt.legend(loc='lower right')
plt.savefig('./data/img/KNN_ROC.jpg')
plt.show()

# 计算查准率、查全率、F1值
test = pd.read_csv('./data/datasets/test.csv')
stroke = test['stroke'].tolist()
predict = test['predict'].tolist()
y_true = np.array(stroke)
y_pred = np.array(predict)
p = precision_score(y_true, y_pred, average='binary')
r = recall_score(y_true, y_pred, average='binary')
f1score = f1_score(y_true, y_pred, average='binary')
end = time.time()
print('查准率: \t', p)
print('查全率: \t', r)
print('F1数值: \t', f1score)
print('-----------------------分类报告-----------------------')
print(classification_report(y_true, y_pred))
print('------------------------------------------------------')
print("\t\t\t\t\t\t\t\t\t\t 用时 {} s".format(round(end - start, 3)))

# 模型优化
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier

train = pd.read_csv('./data/datasets.csv')
predictors = train.drop(['stroke', 'id'], axis=1)
target = train["stroke"]
x = train
y = target

model1 = LogisticRegression(C=0.1)
model2 = SVC(C=0.1, probability=True)
model3 = RandomForestClassifier(n_estimators=10, max_depth=2)
# 投票
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import cross_val_score  # 交叉验证

print("硬投票:")  # 下面这个参数voting='hard'
ensemble_model = VotingClassifier(estimators=[('LR', model1), ('SVC', model2), ('RF', model3)], voting='hard')
for model, label in zip([model1, model2, model3, ensemble_model], ['LR', 'SVC', 'RF', 'Voting']):
    scores = cross_val_score(model, x, y, cv=5, scoring='accuracy')  # 交叉验证
    print('{}准确率平均数:{}'.format(label, scores.mean()))

print("\n软投票:")  # 下面这个参数voting='soft'
ensemble_model = VotingClassifier(estimators=[('LR', model1), ('SVC', model2), ('RF', model3)], voting='soft')
for model, label in zip([model1, model2, model3, ensemble_model], ['LR', 'SVC', 'RF', 'Voting']):
    scores = cross_val_score(model, x, y, cv=5, scoring='accuracy')  # 交叉验证
    print('{}准确率平均数:{}'.format(label, scores.mean()))

import joblib

try:
    model = joblib.load('vot.model')
    print("模型加载成功")
except:
    joblib.dump(model3, 'vot.model')
    model = joblib.load('vot.model')
    print("模型创建成功")

modelgsKNN = VotingClassifier(estimators=[('RF', model)], voting='hard')
for model, label in zip([model, modelgsKNN], ['RF']):
    scores = cross_val_score(model, x, y, cv=5, scoring='accuracy')  # 交叉验证
    print('{}准确率平均数:{}'.format(label, scores.mean()))

modelgsKNN.fit(predictors, target)

# 分类结果
ids = test['id']
predictions = modelgsKNN.predict(test.drop(['stroke', 'id', 'predict'], axis=1))

# 将输出转换为 Dataframe 并保存到submission.csv文件中
output = pd.DataFrame({'id': ids, 'predict': predictions})
output.to_csv('./data/submission_RF.csv', index=False)

# 将得到的预测结果写进 text.csv 里
data = pd.read_csv(r'./data/datasets/test.csv')
data1 = pd.read_csv(r'./data/submission_RF.csv')
data['predict'] = data1['predict']
data.to_csv(r'./data/datasets/test.csv', index=False, sep=',')



