

# 逻辑回归：在输出明确的离散分类值之前，算法首先输出的其实是一个可能性，可以把这个可能性理解成一个概率，然后根据概率输出离散标签值
# 简单使用线性回归 + 阶跃函数有局限性，极端样本对分类函数会产生较大影响，需要对极端样本数据不敏感的函数，即逻辑函数（logistic function）
# 使用sigmoid函数做逻辑分类，y_hat=g(z)=1/(1+np.exp(-z)) 输出结果是一个0-1的数字，代表着分类概率
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

# 数据缩放器，进行数据归一化，使数据在一个较小区间内
from sklearn.preprocessing import MinMaxScaler

# 导入逻辑回归模型
from sklearn.linear_model import LogisticRegression

def logic_regression_demo():
    df_heart = pd.read_csv("heart.csv")

    # 把3个文本型变量转换为哑变量
    a = pd.get_dummies(df_heart['cp'], prefix="cp")
    b = pd.get_dummies(df_heart['thal'], prefix="thal")
    c = pd.get_dummies(df_heart['slope'], prefix="slope")

    # 把哑变量添加进dataframe
    frames = [df_heart, a, b, c]
    df_heart = pd.concat(frames, axis=1)
    df_heart = df_heart.drop(columns=['cp', 'thal', 'slope'])
    print(df_heart.head())

    print(df_heart.head()) # 显示前5行数据
    print(df_heart.target.value_counts()) # 输出分类值，及各个类别数目

    # 以年龄 + 最大心率作为输入，查看分类结果散点图
    plt.scatter(x=df_heart.age[df_heart.target==1],
                y=df_heart.thalach[(df_heart.target==1)],
                c="red")
    plt.scatter(x=df_heart.age[df_heart.target==0],
                y=df_heart.thalach[(df_heart.target==0)], marker='^')
    # plt.legend(["Disease", "No Disease"]) # 显示图例
    plt.xlabel("Age") # x轴标签
    plt.ylabel("Heart Rate") # y轴标签
    plt.show()

    X=df_heart.drop(['target'], axis=1) # 构建特征集
    y=df_heart.target.values # 构建特征集
    y=y.reshape(-1, 1) # -1是相对索引， 等价于len(y)
    print('张量X的形状: ', X.shape)
    print('张量y的形状: ', y.shape)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 拆分数据集
    scaler = MinMaxScaler() # 选择归一化数据缩放器
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    dimension = X.shape[1]  # 这里的维度 len(X)是矩阵的行的数，维度是列的数目
    weight = np.full((dimension, 1), 0.1)  # 权重向量，向量一般是1D，但这里实际上创建了2D张量
    bias = 0  # 偏置值
    # 初始化超参数
    alpha = 1  # 学习速率
    iterations = 500  # 迭代次数

    # 定义逻辑回归模型
    def logistic_regression(X, y, w, b, lr, iter):
        l_history, w_history, b_history = gradient_descent(X, y, w, b, lr, iter)  # 梯度下降
        print("训练最终损失: ", l_history[-1])  # 输出最终损失
        y_pred = predict(X, w_history[-1], b_history[-1])  # 进行预测
        traning_acc = 100 - np.mean(np.abs(y_pred - y_train)) * 100  # 计算准确率
        print("逻辑回归训练准确率: {:2f}%".format(traning_acc))  # 输出准确率
        return l_history, w_history, b_history;  # 返回训练历史记录

    # 梯度下降，训练机器，返回权重，偏置以及训练过程中损失的历史记录
    loss_history, weight_history, bias_history = logistic_regression(X_train, y_train, weight, bias, alpha, iterations)

    y_pred = predict(X_test, weight_history[-1], bias_history[-1]) # 预测测试集
    testing_acc = 100 - np.mean(np.abs(y_pred - y_test)) * 100 # 计算准确率
    print("逻辑回归测试准确率: {:.2f}%".format(testing_acc))
    print("逻辑回归预测分类值: ", predict(X_test, weight_history[-1], bias_history[-1]))


    # 绘制损失曲线
    loss_history_test = np.zeros(iterations) # 初始化历史损失
    for i in range(iterations): # 训练过程中不同参数带来的测试集损失
        loss_history_test[i] = loss_function(X_test, y_test, weight_history[i], bias_history[i])
    index = np.arange(0, iterations, 1)
    plt.plot(index, loss_history, c='blue', linestyle='solid')
    plt.plot(index, loss_history_test, c='red', linestyle='dashed')
    plt.legend(["Training Loss", "Test Loss"])
    plt.xlabel("Number of Iteration")
    plt.ylabel("Cost")
    plt.show()


# 分类预测函数
def predict(X, w, b):
    z = np.dot(X, w) + b # 线性函数
    y_hat = sigmoid(z)
    y_pred = np.zeros((y_hat.shape[0], 1)) # 初始化预测结果变量
    for i in range(y_hat.shape[0]):
        if y_hat[i,0] < 0.5:
            y_pred[i,0] = 0 # 如果预测概率小于0.5, 输出分类0
        else:
            y_pred[i,0] = 1 # 如果预测概率大于0.5, 输出分类1
    return y_pred # 返回预测分类的结果



# 逻辑回归的梯度下降过程
def gradient_descent(X, y, w, b, lr, iter):
    l_history = np.zeros(iter) # 初始化记录梯度下降过程中误差值（损失）的数组
    w_history = np.zeros((iter, w.shape[0], w.shape[1])) # 初始化记录梯度下降过程中权重的数组
    b_history = np.zeros(iter) # 初始化记录梯度下降过程中偏置的数组

    for i in range(iter): # 进行机器训练的迭代
        y_hat = sigmoid(np.dot(X, w) + b) # Sigmoid逻辑函数 + 线性函数(w * X + b)得到y_hat
        loss = -(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) # 计算损失

        derivative_w = np.dot(X.T, ((y_hat - y))) / X.shape[0] # 给权重向量求导
        derivative_b = np.sum(y_hat - y)/X.shape[0] # 给偏置求导
        w = w - lr * derivative_w # 更新权重向导，lr即学习速率
        b = b - lr * derivative_b # 更新偏置，lr即学习速率
        l_history[i] = loss_function(X, y, w, b) # 梯度下降过程中的损失
        print("轮次", i + 1, "当前轮训练集损失: ", l_history[i])
        w_history[i] = w # 梯度下降过程中权重的历史记录, 请注意w_history和w的形状
        b_history[i] = b # 梯度下降过程中偏置的历史记录
    return l_history, w_history, b_history

# 首先定义一个Sigmoid函数，输入Z，返回y'
def sigmoid(z):
    y_hat = 1 / (1 + np.exp(-z))
    return y_hat

# 然后定义损失函数
def loss_function(X,y,w,b):
    y_hat = sigmoid(np.dot(X,w) + b) # Sigmoid逻辑函数 + 线性函数(wX+b)得到y'
    loss = -(y*np.log(y_hat) + (1-y)*np.log(1-y_hat)) # 计算损失
    cost = np.sum(loss) / X.shape[0]  # 整个数据集平均损失
    return cost # 返回整个数据集平均损失


# 直接调用Sklearn库
def logic_regression_sklearn():
    df_heart = pd.read_csv("heart.csv")
    X = df_heart.drop(['target'], axis=1)  # 构建特征集
    y = df_heart.target.values  # 构建特征集
    y = y.reshape(-1, 1)  # -1是相对索引， 等价于len(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)  # 拆分数据集
    scaler = MinMaxScaler()  # 选择归一化数据缩放器
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    lr = LogisticRegression()
    lr.fit(X_train, y_train) # fit，就相当于梯度下降
    print("Sklearn 逻辑回归预测准确率 {:.2f}%".format(lr.score(X_test, y_test)*100))

    # Sklearn准确率较高： 1. 过拟合问题，迭代次数适当调整 2.需要做特征工程，枚举值而不是整数值

if __name__ == '__main__':
    logic_regression_demo();    # logic_regression_sklearn()