import numpy as np
import matplotlib.pyplot as plt


# model
def model(XX, theta):
    """
    The model

    :param XX: Feature values matrix with bias ones
    :param theta: Theta vector
    :return: z values vector
    """
    return XX.dot(theta)


def sigmoid(z):
    """
    The sigmoid function

    :param z: z values vector
    :return: Hypothesis values vector
    """
    return 1 / (1 + np.exp(-z))


# cost function
def cost_func(h, y, theta=None, lam=0):
    """
    The cost function.

    :param h: Hypothesis values vector
    :param y: Target values vector
    :param theta: Theta vector. Optional without regularization.
    :param lam: Lambda parameter of the regularization. Optional without regularization.
    :return: The value of the cost function.
    """
    m = len(h)
    if lam == 0:
        # without regularization
        return -1 / m * np.sum(y*np.log(h) + (1-y)*np.log(1-h))
    else:
        # with regularization
        theta_cp = theta.copy()
        theta_cp[0] = 0
        r = lam / (2 * m) * np.sum(theta_cp ** 2)
        return r - 1 / m * np.sum(y*np.log(h) + (1-y)*np.log(1-h))


# gradient descent algorithm with regularization
def gradient_descent_algorithm(XX, y, lam=0, alpha=0.001, iter0=15000):
    """
    gradient descent algorithm with regularization

    :param XX: Feature values matrix with bias ones
    :param y: Target values vector
    :param lam: The lambda value for regularization. Optional without regularization
    :param alpha: The learning rate.
    :param iter0: The iteration count.
    :return: theta, h, j_his
        theta: the theta vector of the model
        h: the hypothesis of the model in the last iteration
        j_his: all cost function values in all iterations
    """
    m, n = XX.shape  # m: training set data count; n: features count
    theta = np.zeros(n)  # The theta vector
    j_his = np.zeros(iter0)  # The cost function values vector for all iterations

    for i in range(iter0):
        z = model(XX, theta)
        h = sigmoid(z)
        j = cost_func(h, y, theta, lam)
        j_his[i] = j
        r = 0
        if lam != 0:
            theta_copy = theta.copy()
            theta_copy[0] = 0
            r = lam / m * theta_copy
        dt = r + 1 / m * XX.T.dot(h - y)
        theta -= alpha * dt
    return theta, h, j_his


# accuracy rate
def score(h, y):
    """
    The accuracy rate

    :param h: The hypothesis values vector.
    :param y: The target values vector.
    :return: The accuracy rate.
    """
    return np.mean(y == (h > 0.5))


if '__main__' == __name__:
    # parameters
    np.random.seed(1)
    split_rate = 0.7
    alpha, iter0 = 0.001, 15000
    try_max = 20
    lam_delta = 1

    # load data
    data = np.loadtxt(r'../../logic_regression/data/ex2data1.txt', delimiter=',')
    # print(data[:5])

    # scale
    x = data[:, :-1]  # features matrix
    m = x.shape[0]  # data count
    y = data[:, -1]  # target values vector
    mu = x.mean(axis=0)  # mean value vector of features matrix's vertical axis
    sigma = x.std(axis=0)  # scale value vector of features matrix's vertical axis
    x -= mu
    x /= sigma

    # shuffle
    rnd_idx = np.random.permutation(m)  # the random index to shuffle the data
    x = x[rnd_idx]
    y = y[rnd_idx]

    # split
    m_train = int(m * split_rate)
    m_test = m - m_train
    x_train, x_test = np.split(x, [m_train])
    y_train, y_test = np.split(y, [m_train])

    # arrange bias
    XX_train = np.c_[np.ones(m_train), x_train]
    XX_test = np.c_[np.ones(m_test), x_test]

    # train and confirm lambda
    lam = 0
    thetas_arr = []
    lams_arr = []
    for i in range(try_max):
        theta, h_train, j_his = gradient_descent_algorithm(XX_train, y_train, lam, alpha, iter0)
        thetas_arr.append(theta)
        lams_arr.append(lam)
        xrate_train = score(h_train, y_train)
        h_test = sigmoid(model(XX_test, theta))
        xrate_test = score(h_test, y_test)
        print(f'#{i}: rate train = {xrate_train}, rate test = {xrate_test}, lam={lam}, theta={theta}')
        if xrate_test >= xrate_train:
            break
        else:
            lam += lam_delta

    # compare regularized with unregularized
    # plot data
    pos_idx = y == 1
    neg_idx = np.invert(pos_idx)
    plt.scatter(x[pos_idx, 0], x[pos_idx, 1], s=1, c='r', label='positive')
    plt.scatter(x[neg_idx, 0], x[neg_idx, 1], s=1, c='b', label='negitive')
    plt_line_x = np.array([x[:, 0].min(), x[:, 0].max()])
    theta_lam0 = thetas_arr[0]
    plt_line_y_lam0 = -1 / theta_lam0[2] * (theta_lam0[0] + plt_line_x * theta_lam0[1])
    plt.plot(plt_line_x, plt_line_y_lam0, '--', label='border without regularization')
    theta_reg = thetas_arr[-1]
    plt_line_y_reg = -1 / theta_reg[2] * (theta_reg[0] + plt_line_x * theta_reg[1])
    plt.plot(plt_line_x, plt_line_y_reg, label='border with regularization')

    plt.grid()
    plt.legend()
    plt.show()
