import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.metrics import classification_report

np.random.seed(0)

def loadTrainData():
    cancer = load_breast_cancer()  # 加载乳腺癌数据
    X = cancer.data  # 加载乳腺癌判别特征
    y = cancer.target  # 两个TAG，y = 0时为阴性，y = 1时为阳性
    # 将数据集划分为训练集和测试集，测试集占比为0.2
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    return X_train, X_test, y_train, y_test   # X_train/X_test的形状为（N，num_features）


def sigmoid(inx):
    return 1.0/(1.0 + np.exp(-inx))

# 初始化参数
def initialize_para(dim):
    mu = 0
    sigma = 0.1
    np.random.seed(0)
    # 使用均值为mu，方差为sigma的正态分布初始化权重矩阵w
    w = np.random.normal(mu, sigma, dim)
    w = np.reshape(w, (dim, 1))
    b = 0
    return w, b


# 前向传播
def propagate(w, b, X, Y):
    # eps防止log运算遇到0
    eps = 1e-5
    m = X.shape[0]
    # 计算初步运算结果,注意这里X,w的形状分别为 （N，num_features），（num_features，1），所以矩阵乘法形式为X @ w；
    # 还有w.T @ X的形式，要看之前的数据处理和参数初始化的设置是怎样的
    A = sigmoid(np.dot(X, w) + b)
    # 计算损失函数值大小，注意到(1/m) * sum(...)，求的是所有批次样本的平均损失
    cost = -1 / m * np.sum(np.multiply(Y, np.log(A + eps)) + np.multiply(1 - Y, np.log(1 - A + eps)))
    # 计算梯度值
    dw = 1 / m * np.dot(X.T, (A - Y.reshape(-1,1)))
    db = 1 / m * np.sum(A-Y)
    cost = np.squeeze(cost)

    grads = {"dw": dw,
             "db": db}
    # 返回损失函数大小以及反向传播的梯度值
    return grads, cost, A


# num_iterations 梯度下降次数
# learning_rate 学习率
def optimize(w, b, X, Y, num_iterations, learning_rate):
    costs = []  # 记录损失函数值

    # 循环进行梯度下降
    for i in range(num_iterations):
        # print(i)
        grads, cost, pre_Y = propagate(w, b, X, Y)
        dw = grads["dw"]
        db = grads["db"]

        w = w - learning_rate * dw
        b = b - learning_rate * db

        # 每100次循环记录一次损失函数大小并打印损失值、准确率
        if i % 100 == 0:
            costs.append(cost)

            pre_Y[pre_Y >= 0.5] = 1
            pre_Y[pre_Y < 0.5] = 0
            pre_Y = pre_Y.astype(np.int_).squeeze()   # 为了和 Y 的数据类型和形状对应，Y为形状为（N,）的整数标签数组
            acc = np.sum(pre_Y == Y) / len(Y)
            print("Iteration:{} Loss = {}, Acc = {}".format(i, cost, acc))

    # 最终参数值
    params = {"w": w,
              "b": b}

    return params, costs

def predict(w, b, X):
    # 样本个数
    m = X.shape[0]
    # 初始化预测输出
    Y_prediction = np.zeros((m,1))
    # 转置参数向量w
    w = w.reshape(X.shape[1], 1)  # 也可w.reshape(-1, 1)

    # 预测结果
    Y_hat = sigmoid(np.dot(X, w) + b)

    # 将结果按照0.5的阈值转化为0/1（直接将预测结果大于0.5的对应位置的Y_prediction的元素赋值为1，其它则为原来的0）
    Y_prediction[Y_hat >= 0.5] = 1

    return Y_prediction.squeeze()


# 训练以及预测
def Logisticmodel(X_train, Y_train, X_test, Y_test, num_iterations=1000, learning_rate=0.1):
    # 初始化参数w，b
    w, b = initialize_para(X_train.shape[1])
    # 梯度下降找到最优参数
    parameters, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate)

    w = parameters["w"]
    b = parameters["b"]

    # 训练集测试集的预测结果
    Y_prediction_train = predict(w, b, X_train)
    Y_prediction_test = predict(w, b, X_test)

    # 模型评价
    accuracy_score_value = accuracy_score(Y_test, Y_prediction_test)
    recall_score_value = recall_score(Y_test, Y_prediction_test)
    precision_score_value = precision_score(Y_test, Y_prediction_test)
    classification_report_value = classification_report(Y_test, Y_prediction_test)

    print("准确率:", accuracy_score_value)
    print("召回率:", recall_score_value)
    print("精确率:", precision_score_value)
    print(classification_report_value)

    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test,
         "Y_prediction_train": Y_prediction_train,
         "w": w,
         "b": b,
         "learning_rate": learning_rate,
         "num_iterations": num_iterations}

    return d


if __name__ == '__main__':
    X_train, X_test, y_train, y_test = loadTrainData()
    Logisticmodel(X_train, y_train, X_test, y_test)
