#!/usr/bin/env python
# coding: utf-8

# In[2]:


import warnings
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
#
import tensorflow as tf
from sklearn.preprocessing import StandardScaler

from tensorflow.python.keras import layers, models
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense
#
# get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
from sklearn.linear_model import RidgeCV, LassoCV, Ridge, Lasso

warnings.filterwarnings("ignore")
from matplotlib import pyplot as plt
from sklearn import tree
import pandas as pd
import numpy as np
import plotly.express as px
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score

# In[5]:


df = pd.read_csv('D:/heart.csv',encoding='gb18030')
df.head()  # Read the dataset and taking a look at the first 5 rows of the dataset

# In[6]:


# 处理缺失值
df.fillna(df.mean(), inplace=True)

# In[7]:


# 划分训练集、校验集、测试集
##特征处理
train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)
train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=42)
# 保存数据集
train_df.to_csv('train.csv', index=False)
val_df.to_csv('val.csv', index=False)
test_df.to_csv('test.csv', index=False)
# 读取数据集
train_df = pd.read_csv('train.csv')
val_df = pd.read_csv('val.csv')
# 打印一下看看能不能读取
print(train_df)
print(val_df)

# In[8]:


# 提取特征和标签
train_features = train_df.drop('smoking', axis=1)
train_labels = train_df['smoking']
val_features = val_df.drop('smoking', axis=1)
val_labels = val_df['smoking']
# 训练模型
model = LinearRegression()
model.fit(train_features, train_labels)
# 预测并计算误差
pred_labels = model.predict(val_features)
mse = ((pred_labels - val_labels) ** 2).mean()
print('均方误差:', mse)
# 以上代码中，首先使用`pandas`库读取训练集和校验集，然后提取特征和标签。
# 接着，使用`LinearRegression`类训练线性回归模型，并使用训练好的模型对校验集进行预测，并计算预测误差。最后，输出预测误差。


# In[9]:


# 提取特征和标签
train_features = train_df.drop('serum_creatinine', axis=1)
train_labels = train_df['serum_creatinine']
val_features = val_df.drop('serum_creatinine', axis=1)
val_labels = val_df['serum_creatinine']
# 训练模型
model = LinearRegression()
model.fit(train_features, train_labels)
# 预测并计算误差
pred_labels = model.predict(val_features)
mue = ((pred_labels - val_labels) ** 2).mean()
print('均方误差:', mue)

# In[10]:

# In[11]:


df.describe()

# In[12]:


# 获取二维向量形状
df.shape

# In[13]:


# 检查各个字段中缺损值
df.isnull().sum()

# In[14]:


# 检查各个字段中的重复值
df.duplicated().sum()

# In[15]:


fig, ax = plt.subplots()
# 因子相关性分析
plt.title('Correlationship of factors')
fig.set_size_inches((10, 10))
sns.heatmap(df.corr(), square=True, annot=True)
# 通过下图分析会发现处理性别和抽烟习惯有较明显的相关性外，其余因子间相关性并不大


# In[16]:


# 针对死亡事件进行因子归因
sns.heatmap(df.corr()[['DEATH_EVENT']].sort_values(by='DEATH_EVENT', ascending=False), annot=True)
plt.title('Correlation Heatmap of Heart Failure Prediction')
plt.show()
# 通过图谱分析可以看出血液中血清肌酐/肌酐磷酸激酶水平、年龄、高血压、贫血对于心血管疾病的死亡有着正相关关系。


# In[17]:


num_col = ["age", "creatinine_phosphokinase", "ejection_fraction", "platelets", "serum_creatinine", "serum_sodium"]

for i in num_col:
    # 每个子图为1行2列
    fig, axs = plt.subplots(1, 2, figsize=(15, 3))
    plt.title(i)
    sns.histplot(data=df[i], bins=20, kde=True, ax=axs[0]);
    sns.boxplot(data=df[i], ax=axs[1], color='#99befd', fliersize=1);

# In[18]:


data = df.copy()
features_with_outliers = ['creatinine_phosphokinase', 'ejection_fraction', 'platelets', 'serum_creatinine',
                          'serum_sodium']
for f in features_with_outliers:
    q1 = data[f].quantile(0.25)
    q3 = data[f].quantile(0.75)
    IQR = q3 - q1
    lower_limit = q1 - (IQR * 1.5)
    upper_limit = q3 + (IQR * 1.5)
    data.loc[data[f] < lower_limit, f] = lower_limit
    data.loc[data[f] > upper_limit, f] = upper_limit

num_col = ["age", "creatinine_phosphokinase", "ejection_fraction", "platelets", "serum_creatinine", "serum_sodium",
           "time"]
for i in num_col:
    fig, axs = plt.subplots(1, 2, figsize=(15, 3))
    sns.histplot(data=data[i], bins=20, kde=True, ax=axs[0]);
    sns.boxplot(data=data[i], ax=axs[1], color='#99befd', fliersize=1);

# In[19]:


train_x = df.loc[:100, :]  # 这就是前100行数据
judge_data = df.loc[100:, :]  # 待判断数据
print(judge_data)
# 逐列计算最大值和最小值
x_max = train_x.max(axis=0)
x_min = train_x.min(axis=0)  # 如果axis=1就是计算每一行的最大值和最小值
print(x_max, "\n", x_min)
# 使用归一化对数据进行处理
# 注意,这里我们要转化一下数据类型,不然可能会报错，字符串与字符串无法使用减号
train_x = train_x.apply(pd.to_numeric)
x_min = x_min.apply(pd.to_numeric)
judge_data = judge_data.apply(pd.to_numeric)
x_max = x_max.apply(pd.to_numeric)  # 到这数据转换完毕
standardization = (train_x - x_min) / (x_max - x_min)
# 这里的x2是逆向指标,我们需要做额外的标准化:
standardization.iloc[:, 1] = (x_min[1] - train_x.iloc[:, 1]) / (x_max[1] - x_min[1])  # 到此我们的训练集数据标准化完成
y_0 = np.hstack([np.zeros(5), np.ones(5)])  # 标号值，5个0,5个1
print("训练集数据标准化已完成", "\n", standardization)
# # 待判数据标准化
judge_data = (judge_data - x_min) / (x_max - x_min)
# # 同样第二列特殊处理
judge_data.iloc[:, 1] = (x_min[1] - judge_data.iloc[:, 1]) / (x_max[1] - x_min[1])
#


# In[24]:


#  x为导致的因素
x = df.drop('smoking', axis=1)
#  y为死亡事件
y = df['DEATH_EVENT']

#  将数据标准化
X = df.drop('DEATH_EVENT', axis=1)
#  一共299个数据20个作为训练集，20个作为测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=20 / 299)

# In[21]:


# 决策树模型
clf = tree.DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=10,
                                  min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.2)

clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("决策树准确率：")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print('模型得分: {:2f}'.format(clf.score(X_test, y_test)))
y_ = np.array(y_test)
print("决策树预测结果:", clf.predict(X_test))
print('真实结果:     ', y_)
###
pos_samples = np.sum(y_pred == 1)

# 计算负样本数量
neg_samples = np.sum(y_pred == 0)

print("Positive samples:", pos_samples)
print("Negative samples:", neg_samples)
print('--------------------------------------------------------------------------------------')

# In[43]:


# SVM模型
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=20/299, random_state=0)
svm = SVC(probability=True, random_state=0)
svm.fit(X_train, y_train)
y_pred = svm.predict(X_test)
print('SVM准确率')
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print('模型得分:{:.2f}'.format(svm.score(X_test, y_test)))
y_ = np.array(y_test)
print('SVM预测结果：', svm.predict(X_test))
print('真实结果:    ', y_)

# 计算正样本数量
pos_samples = np.sum(y_pred == 1)

# 计算负样本数量
neg_samples = np.sum(y_pred == 0)

print("Positive samples:", pos_samples)
print("Negative samples:", neg_samples)
print('--------------------------------------------------------------------------------------')


# In[41]:


# 贝叶斯模型

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=20 / 299, random_state=0)
# 创建高斯朴素贝叶斯实例
gnb = GaussianNB()
# 模型拟合并预测
y_pred = gnb.fit(X_train, y_train).predict(X_test)
print("贝叶斯准确率：")
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
print('模型得分:{:.2f}'.format(gnb.score(X_test, y_test)))
y_ = np.array(y_test)
print('贝叶斯预测结果：', gnb.predict(X_test))
print('真实结果:    ', y_)
# 计算正样本数量
pos_samples = np.sum(y_pred == 1)

# 计算负样本数量
neg_samples = np.sum(y_pred == 0)

print("Positive samples:", pos_samples)
print("Negative samples:", neg_samples)
print('--------------------------------------------------------------------------------------')


# In[39]:


# 数据预处理
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
sc = StandardScaler()
X = sc.fit_transform(X)

# 构建神经网络模型
model = models.Sequential()
model.add(layers.Dense(64, input_dim=X.shape[1], activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# 编译模型
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

# In[40]:


# 训练模型
model.fit(X, y, epochs=100, batch_size=32, validation_split=0.2)

# 评估模型
loss, accuracy = model.evaluate(X, y)
print('Loss:', loss)
print('Accuracy:', accuracy)