import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer


# lin model
def model(x, theta):
    return x.dot(theta)


# sigmoid
def sigmoid(z):
    return 1 / (1 + np.exp(-z))


# cost function
def cost_func(h, y):
    m = len(h)
    return -1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


# forward propagation
def FP(x, theta1, theta2, theta3):
    a1 = x
    z2 = model(a1, theta1)
    a2 = sigmoid(z2)
    z3 = model(a2, theta2)
    a3 = sigmoid(z3)
    z4 = model(a3, theta3)
    a4 = sigmoid(z4)
    return a2, a3, a4


# backward propagation
def BP(x, y, theta1, theta2, theta3, a2, a3, a4, alpha):
    m = len(x)
    s4 = a4 - y
    s3 = s4.dot(theta3.T) * (a3 * (1 - a3))
    s2 = s3.dot(theta2.T) * (a2 * (1 - a2))
    dt3 = 1 / m * a3.T.dot(s4)
    dt2 = 1 / m * a2.T.dot(s3)
    dt1 = 1 / m * x.T.dot(s2)
    theta3 -= alpha * dt3
    theta2 -= alpha * dt2
    theta1 -= alpha * dt1
    return theta1, theta2, theta3


# gradient descent
def grad(x, y, alpha=0.01, iter0=15000):
    L2, L3, L4 = 100, 50, 1
    group = iter0 // 20
    m, n = x.shape
    theta1 = np.random.randn(n, L2)
    theta2 = np.random.randn(L2, L3)
    theta3 = np.random.randn(L3, L4)
    J = np.zeros(iter0)
    for i in range(iter0):
        a2, a3, a4 = FP(x, theta1, theta2, theta3)
        J[i] = cost_func(a4, y)
        if i % group == 0:
            print(f'#{i} cost function value = {J[i]}')
        theta1, theta2, theta3 = BP(x, y, theta1, theta2, theta3, a2, a3, a4, alpha)
    print(f'#{i + 1} cost function value = {J[i]}')
    return theta1, theta2, theta3, a4, J


# score function
def score(h, y):
    return np.mean(y == (h > 0.5))


# load data, train model
if '__main__' == __name__:
    np.random.seed(1)
    plt.figure(figsize=[16, 12])

    # load data
    cancer = load_breast_cancer()
    x = cancer.data
    m, n = x.shape
    y = cancer.target.reshape(-1, 1)

    # shuffle
    rnd_idx = np.random.permutation(m)
    x = x[rnd_idx]
    y = y[rnd_idx]

    # scale data
    mu = x.mean(axis=0)
    sigma = x.std(axis=0)
    x -= mu
    x /= sigma

    # split
    num = int(m * 0.7)
    m_train = num
    m_test = m - num
    x_train, x_test = np.split(x, [num])
    y_train, y_test = np.split(y, [num])
    XX_train = np.c_[np.ones(m_train), x_train]
    XX_test = np.c_[np.ones(m_test), x_test]

    # train
    alpha = 0.01
    iter0 = 15000
    theta1, theta2, theta3, h_train, J = grad(XX_train, y_train, alpha, iter0)
    print(f'Training score = {score(h_train, y_train)}')

    # plot cost function values in all iterations
    plt.plot(J, label='cost function values')
    plt.grid()
    plt.legend()

    # test
    a2, a3, h_test = FP(XX_test, theta1, theta2, theta3)
    print(f'Testing score = {score(h_test, y_test)}')

    plt.show()
