import numpy as np


def h_model(X, theta):
    """

    :param X: Feature values matrix with bias
    :param theta:
    :return:
    """
    return X.dot(theta)


def J_cost_function(X, y, theta=None, lam=0):
    """

    :param X: Feature values matrix with bias
    :param y:
    :param theta:
    :param lam:
    :return:
    """
    m = X.shape[0]
    n = X.shape[1]
    if theta is None:
        theta = np.zeros([n, 1])
    h = np.dot(X, theta)
    error = h - y
    theta_square_sum = 0
    if lam > 0:
        theta_square = theta.copy().ravel()
        theta_square = theta_square[1:]  # exclude theta0
        theta_square *= theta_square
        theta_square_sum = np.sum(theta_square)
    J = (1.0/2*m) * (error.T.dot(error).ravel()[0] + lam * theta_square_sum)
    return J


def gradient_descent_algorithm(X, y, theta=None, alpher=0.001, num_iters=15000, lam=0, with_history_and_score=True):
    """

    :param X: Feature values matrix with bias
    :param y:
    :param theta:
    :param alpher:
    :param num_iters:
    :param lam:
    :param with_history_and_score:
    :return:
    """
    m = X.shape[0]
    n = X.shape[1]
    if theta is None:
        theta = np.zeros([n, 1])

    J_histories = []
    xscores = []

    for i in range(num_iters):
        if with_history_and_score:
            J_histories.append(J_cost_function(X, y, theta, lam=lam))
            xscores.append(P_score_function(X, y, theta))

        h = np.dot(X, theta)
        lambda_theta = np.zeros_like(theta)
        if lam > 0:
            theta_cp = theta.copy()
            zero_theta1_n = np.r_[[[0]], theta_cp[1:, :]]
            lambda_theta = zero_theta1_n * lam

        deltheater = (1.0/m)*(X.T.dot(h - y) + lambda_theta)

        if np.allclose(np.zeros([n, 1]), deltheater):
            return theta, J_histories, xscores

        theta -= alpher * deltheater

    return theta, J_histories, xscores


def P_score_function(X, y, theta):
    h = np.dot(X, theta)
    error = h - y
    y_mean = np.mean(y)
    dya = y - y_mean
    # u = np.sum((train_h - y) ** 2)
    # v = np.sum((y - y_mean) ** 2)
    u = np.dot(error.T, error)
    v = np.dot(dya.T, dya)
    u = u[0][0]
    v = v[0][0]
    score = 1 - u/v
    return score


def scale_feature_data(X1_n):
    """
    Scale data
    :param X1_n: x1, x2, ..., xn, y
    :return: The scaled vetctor x1', x2', ..., xn', y' with mu vector and sigma vector
    """
    X1_n = X1_n.copy()
    mu = np.mean(X1_n, axis=0)
    sigma = np.std(X1_n, axis=0)
    X1_n -= mu
    X1_n /= sigma + 1e-8
    return X1_n, mu, sigma


def scale_theta_back(X1_nOri, YOri, thetaScaled):
    """
    This function is inspired by below web pages:
    https://stackoverflow.com/questions/21168844/rescaling-after-feature-scaling-linear-regression/21170874#21170874
    https://www.zhihu.com/question/275019388 (In Chinese)

    :param X1_nOri: Features without x0, i.e. without the 1st ones column
    :param YOri: Target values
    :param thetaScaled: The theta vector after data is scaled.
    :return: thetaOri: The theta vector related to real data
    """
    print('theta:', thetaScaled)
    n = thetaScaled.shape[0] - 1

    X, ux, sx = scale_feature_data(X1_nOri)
    ux = ux.reshape(n, 1)
    sx = sx.reshape(n, 1)
    print('mu_x', ux.ravel())
    print('sigma_x', sx.ravel())

    uy = YOri.mean()  # scalar
    sy = YOri.std()  # scalar
    print('mu_y', uy)
    print('sigma_y', sy)

    theta0Scaled = thetaScaled[0, 0]

    series = thetaScaled[1:, 0].ravel().copy()
    tmp = ux.ravel()
    series *= tmp
    tmp = sx.ravel()
    series /= tmp
    xsum = np.sum(series)
    print('xsum', xsum)
    print('theta0Scaled', theta0Scaled)
    theta0Ori = (theta0Scaled - xsum) * sy + uy

    theta1toNOri = thetaScaled[1:].copy().ravel()
    theta1toNOri *= sy
    tmp = sx.ravel()
    theta1toNOri /= tmp
    theta1toNOri.resize(n, 1)

    thetaOri = np.r_[[[theta0Ori]], theta1toNOri]
    return thetaOri


def regular_equation_algorithm(X, y):
    m = X.shape[0]
    XT = X.T
    XTX = XT.dot(X)
    XTX_inv = np.linalg.inv(XTX)
    theta = XTX_inv.dot(XT).dot(y)
    return theta
