from sklearn.datasets import load_breast_cancer
import numpy as np
import matplotlib.pyplot as plt


def model(XX, theta):
    return XX.dot(theta)


def sigmoid(z):
    return 1/(1 + np.exp(-z))


def cost(h, y, theta, lamda):
    m = len(h)

    # regular item
    r = lamda / (2*m) * np.sum(theta**2)

    return r - 1.0 / m * np.sum(y*np.log(h) + (1 - y)*np.log(1 - h))


def grad(x, y, lamda, iter0=5000, alpha=0.01):
    """

    :param x: feature matrix with bias
    :param y:
    :param lamda:
    :param iter0:
    :param alpha:
    :return:
    """
    m, n = x.shape
    theta = np.zeros(n)
    J = np.zeros(iter0)

    for i in range(iter0):
        z = model(x, theta)
        h = sigmoid(z)
        J[i] = cost(h, y, theta, lamda)

        # regular item
        r = lamda / m * theta

        dt = 1/m * x.T.dot(h - y) + r
        theta -= alpha * dt
    return h, theta, J


def score(h, y):
    return np.mean(y == (h > 0.5))


if '__main__' == __name__:
    data = np.loadtxt(r'../../logic_regression/data/ex2data1.txt', delimiter=',')
    print(data[:5])
    x = data[:, :-1]  # feature matrix without bias
    y = data[:, -1]  # target vector

    mu = x.mean(axis=0)  # vector
    sigma = x.std(axis=0)  # vector
    x -= mu
    x /= sigma

    XX = np.c_[np.ones(len(x)), x]

    h0, theta0, J0 = grad(XX, y, lamda=0)
    h3, theta3, J3 = grad(XX, y, lamda=3)
    print(f'theta0 = {theta0}, theta3 = {theta3}')
    plt.plot(J0, label='J0')
    plt.plot(J3, label='J3')
    plt.legend()

    s0 = score(h0, y)
    print(f's0 = {s0}')
    s3 = score(h3, y)
    print(f's3 = {s3}')

    plt.show()
