import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification


def init_param(shape):
    """
    初始化参数
    :param shape: 要求的形状
    :return:
    """
    w = np.zeros(shape=(shape, 1))
    b = 0

    return w, b


def base_sigmoid(z):
    """
    手动实现的sigmoid函数
    :param z:
    :return:
    """
    return 1 / (1 + np.exp(-z))


def decrese_gradent(w, b, X, Y, learn_rate):
    """
    梯度下降更新参数
    :param w: [5, 1]
    :param b: 0维
    :param X: [5, 350]
    :param Y: [1, 350]
    :param learn_rate:
    :return:
    """
    m = Y.shape[1]
    # 1. 通过sigmoid函数计算预测结果
    # y_pre: [1, 350]
    y_pre = base_sigmoid(np.dot(w.T, X) + b)

    # 2. 计算损失
    # loss: 0维度
    loss = -1 / m * np.sum(Y * np.log(y_pre) + (1 - Y) * np.log(1 - y_pre))

    # 3. 反向传播
    # dz: [1, 350]
    dz = y_pre - Y
    # dw: [5, 1]
    dw = 1 / m * np.dot(X, dz.T)
    # db: 0维
    db = 1 / m * np.sum(dz)

    # 4. 更新参数
    w -= learn_rate * dw
    b -= learn_rate * db

    return w, b, loss, y_pre


def optimizer(w, b, X, Y, epoch, learn_rate):
    """
    优化参数
    :param w: 优化的参数w [5, 1]
    :param b: 优化的参数b 0维
    :param X: 特征值 [5, 350]
    :param Y: 目标值 [1, 350]
    :param epoch: 优化次数
    :param learn_rate: 学习率
    :return:
    """
    y_pre = ""
    for i in range(epoch):
        # 梯度下降优化参数
        w, b, loss, y_pre = decrese_gradent(w, b, X, Y, learn_rate)

        if i % 100 == 0:
            print(f"第{i + 1}轮的损失为:{loss}")

    y_pre[y_pre >= 0.5] = 1
    y_pre[y_pre < 0.5] = 0
    y_true = y_pre - Y
    y_true = np.where(y_true == 0, 1, 0)
    accuracy = y_true.mean()
    print("训练集的准确率为:", accuracy)
    return w, b


def predict(w, b, X, Y):
    """
    模型预测
    :param w:
    :param b:
    :param X:
    :param Y:
    :return:
    """
    y_pre = base_sigmoid(np.dot(w.T, X) + b)
    y_pre[y_pre >= 0.5] = 1
    y_pre[y_pre < 0.5] = 0
    y_true = y_pre - Y
    y_true = np.where(y_true == 0, 1, 0)
    accuracy = y_true.mean()
    print("测试集的准确率为:", accuracy)


def model(train_x, train_y, test_x, test_y, train_epoch=1000, learn_rate=0.01):
    """
    模型的训练和评估
    :param train_x: 训练特征值 [5, 350]
    :param train_y: 训练目标值 [1, 350]
    :param test_x: 测试特征值 [5, 150]
    :param test_y: 测试目标值 [1, 150]
    :param train_epoch: 训练轮数
    :param learn_rate: 学习率
    :return:
    """
    # 1. 初始化参数
    # w: [5, 1]
    # b: 0维
    w, b = init_param(train_x.shape[0])

    # 2. 优化参数，进行训练
    w, b = optimizer(w, b, train_x, train_y, epoch=train_epoch, learn_rate=learn_rate)

    # 3. 模型评估
    predict(w, b, test_x, test_y)


if __name__ == '__main__':
    # 准备数据
    # X: [500, 5]
    # Y: [500,]
    X, Y = make_classification(n_samples=500, n_features=5, n_classes=2)

    # 处理数据
    # x_train: [350, 5]
    # y_train: [350,]
    # x_test: [150, 5]
    # y_test: [150]
    # sklearn返回的是训练特征值，测试特征值，训练目标值，测试目标值， 主义和keras的区别
    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3)
    # x_train: [5. 350]
    x_train = x_train.T
    print(x_train.shape)
    # y_train: [1, 350]
    y_train = y_train.reshape(-1, y_train.shape[0])
    print(y_train.shape)
    # x_test: [5, 150]
    x_test = x_test.T
    print(x_test.shape)
    # y_test: [1, 150]
    y_test = y_test.reshape(-1, y_test.shape[0])
    print(y_test.shape)

    model(x_train, y_train, x_test, y_test, train_epoch=3000, learn_rate=0.001)
