# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load

import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)

# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory

import os
for dirname, _, filenames in os.walk('/kaggle/input'):
    for filename in filenames:
        print(os.path.join(dirname, filename))

# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" 
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session

import numpy as np    # 导入numpy包
import pandas as pd   # 导入pandas库
# 1.数据读取
df_heart = pd.read_csv("heart.csv")   # 读取文件
df_heart.head()                       # 显示前五行数据

df_heart.target.value_counts()        # 输出分类值及各个类别数目

# 对显示年龄/最大心率两个特征与是否患病之间的关系
import matplotlib.pyplot as plt    # 导入绘图工具
# 年龄 + 最大心率 作为输入，查看分类结果散点图
plt.scatter(x=df_heart.age[df_heart.target==1],
           y=df_heart.thalach[df_heart.target==1], c="red")

plt.scatter(x=df_heart.age[df_heart.target==0],
           y=df_heart.thalach[df_heart.target==0], marker="^")

plt.legend(["Disease", "No Disease"])   # 显示图例
plt.xlabel("Age")   # x轴标签
plt.ylabel("Heart Rate")   # y轴标签
plt.show()  # 显示散点图

# 2.构建特征集和标签集
X = df_heart.drop(['target'], axis = 1)     # 构建特征集
y = df_heart.target.values                  # 构建标签集
y = y.reshape(-1,1)                         # -1是相对索引，等价于len(y)
print("张量X的形状:", X.shape)
print("张量y的形状:", y.shape)

# 3.拆分数据集
# 按照80、20的比例拆分准备训练集和测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)


# 4.数据缩放
# 这里使用了sklearn内置的数据缩放器
from sklearn.preprocessing import MinMaxScaler  # 导入数据缩放器
scaler = MinMaxScaler()                         # 选着归一化数据缩放器MinMaxScaler
X_train = scaler.fit_transform(X_train)         # 特征归一化训练集fit_transform，是先拟合再应用
X_test = scaler.transform(X_test)               # 特征归一化测试集transform,是直接应用
# 因为所有的最大值，最小值，均值，标准差等数据的缩放的中间值都要从训练集得来，然后同样的值应用到训练集和测试集

# 4.2.3 建立逻辑回归模型

# 1.逻辑函数的定义
# 首先准备Sigmoid函数,一会儿调用他
# Sigmoid函数，输入z，返回y'
def sigmoid(z):
    y_hat = 1/(1+np.exp(-z))
    return y_hat


# 2.损失函数的定义
def loss_function(X, y, w, b):
    y_hat = sigmoid(np.dot(X, w) + b)                            # Sigmoid是逻辑函数 + 线性函数(wX+b)得到y'
    loss = -((y*np.log(y_hat) + (1-y)*np.log(1-y_hat)))          # 计算损失
    cost = np.sum(loss) / X.shape[0]                            # 整个数据集的平均损失
    return cost                                                 # 返回整个数据集的平均损失


# 3.梯度下降的实现
def gradient_descent(X, y, w, b, lr, iter):             # 定义逻辑回归梯度下降函数
    l_history = np.zeros(iter)                          # 初始化梯度下降过程中误差值（损失）的数组
    w_history = np.zeros((iter, w.shape[0], w.shape[1]))   # 初始化记录梯度下降过程中权重的数组
    b_history = np.zeros(iter)                             # 初始化记录梯度下降过程中偏置的数组
    for i in range(iter):                                  # 进行迭代训练
        y_hat = sigmoid(np.dot(X, w) + b)                  # Sigmoid逻辑函数 + 线性函数(wX+b) 得到 y'
        loss = -(y*np.log(y_hat) + (1-y)*np.log(1-y_hat))   # 计算损失
        derivative_w = np.dot(X.T, ((y_hat-y)))/X.shape[0]     # 给权重向量求导
        derivative_b = np.sum(y_hat-y)/X.shape[0]              # 给偏置求导
        w = w - lr * derivative_w                           # 更行权重向量，lr即学习速率alpha
        b = b - lr * derivative_b                            # 更新偏置，lr即学习速率alpha
        l_history[i] = loss_function(X, y, w, b)            # 梯度下降过程中的损失
        print("轮次:", i+1, "当前轮训练集损失:", l_history[i])
        w_history[i] = w   # 梯度下降过程中权重的历史记录，，请注意w_history和w的形状
        b_history[i] = b   # 梯度下降过程中偏置的历史记录
    return l_history, w_history, b_history


    # 先定义一个负责分类预测的函数
def predict(X, w, b):              # 定义预测函数
    z = np.dot(X, w) + b           # 线性函数
    y_hat = sigmoid(z)             # 逻辑函数转换
    y_pred = np.zeros((y_hat.shape[0], 1))      # 初始化预测结果变量
    for i in range(y_hat.shape[0]):
        if y_hat[i, 0] < 0.5:
            y_pred[i, 0] = 0         # 如果预测概率小于0.5,输出分类0
        else:
            y_pred[i, 0] = 1         # 如果预测概率大于0.5，输出分类0
    return y_pred                   # 返回预测分类的结果


def logistic_regression(X, y, w, b, lr, iter):     # 定义逻辑回归模型
    l_history, w_history, b_history = gradient_descent(X, y, w, b, lr, iter)    # 梯度下降
    print("训练最终损失:", l_history[-1])                                     # 输出最终损失
    y_pred = predict(X, w_history[-1], b_history[-1])                      # 进行预测
    training_acc = 100 - np.mean(np.abs(y_pred - y_train))*100            # 计算准确率
    print("逻辑回归训练准确率: {:.2f}%".format(training_acc))                 # 输出准确率
    return l_history, w_history, b_history                                # 返回历史训练记录


# 还要准备参数的初始值
# 初始化参数
dimension = X.shape[1]         # 这里的维度是len(X)是矩阵的行数目，维度是列的数目
weight = np.full((dimension, 1), 0.1)   #权重向量，向量一般是1D的，这里实际上创建了2D向量
bias = 0
# 初始化超参数
alpha = 1                       # 学习速率
iterations = 500                # 迭代次数


# 调用逻辑回归函数训练机器
loss_history, weight_history, bias_history = logistic_regression(X_train, y_train, weight, bias, alpha, iterations)


# 下面代码用训练皓的逻辑回归模型对测试集进行分类预测
y_pred = predict(X_test, weight_history[-1], bias_history[-1])     # 预测测试集
testing_acc = 100 - np.mean(np.abs(y_pred - y_test))*100           # 计算准确率
print("逻辑回归测试准确率: {:.2f}%".format(testing_acc))

loss_history_test = np.zeros(iterations)     # 初始化历史损失
for i in range(iterations):                  # 求训练过程中不同参数带来的测试集损失
    loss_history_test[i] = loss_function(X_test, y_test, weight_history[i], bias_history[i])
index = np.arange(0, iterations, 1)
plt.plot(index, loss_history, c='blue', linestyle='solid')
plt.plot(index, loss_history_test, c='red', linestyle='dashed')
plt.legend(["Training Loss", "Test Loss"])
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.show()                                    # 同时显示训练集和测试集损失曲线


# 把3个文本型变量转换成哑变量(虚拟变量)
a = pd.get_dummies(df_heart['cp'], prefix = "cp")
b = pd.get_dummies(df_heart['thal'], prefix = "thal")
c = pd.get_dummies(df_heart['slope'], prefix = "slope")

# 把哑变量添加进dataframe
frames = [df_heart, a, b, c]
df_heart = pd.concat(frames, axis = 1)
df_heart = df_heart.drop(columns = ['cp', 'thal', 'slope'])
df_heart.head()     # 显示新的dataframe

from sklearn.linear_model import LogisticRegression   # 导入逻辑回归模型
lr = LogisticRegression()                             # lr，就是代表逻辑回归模型
lr.fit(X_train, y_train)                              # fit就代表梯度下降
print("SK learn逻辑回归测试的准确率 {:.2f}%".format(lr.score(X_test, y_test)*100))