import numpy as np


def sigmoid(h):
    h = h.ravel()
    u = np.exp(-h)
    r = 1.0 / (1.0 + u)
    return r


def J_cost_function_logic_regression(X, y, theater=None):
    m = X.shape[0]
    n = X.shape[1]
    if theater is None:
        theater = np.zeros([n, 1])
    h = np.dot(X, theater).ravel()
    h = sigmoid(h)
    y = y.ravel()

    t1 = y
    t1 *= np.log(h)
    t2 = 1.0 - y
    t2 *= np.log(1.0 - h)
    t = t1 + t2
    xsum = np.sum(t)
    J = (-1.0 / m) * xsum
    return J


def gradient_descent_algorithm_logic_regression(X, y, theater=None, alpha=0.001, num_iters=15000,
                                                with_history_and_score=True):
    m = X.shape[0]
    n = X.shape[1]
    if theater is None:
        theater = np.zeros([n, 1])

    J_histories = []

    for i in range(num_iters):
        if with_history_and_score:
            J_histories.append(J_cost_function_logic_regression(X, y, theater))
        h = np.dot(X, theater).ravel()
        h = sigmoid(h)
        h.resize(m, 1)
        deltheater = (1.0 / m) * (X.T.dot(h - y))
        if np.allclose(np.zeros([n, 1]), deltheater, rtol=1.e-2):
            return theater, J_histories
        theater = theater - alpha * deltheater
    return theater, J_histories


def testLogRegres(X, y, theta):
    n, f = np.shape(X)
    # 测试集样本数量n,特征数量f
    matchCount = 0
    # 匹配数目
    predictY = []
    # 预测值集合
    for i in range(n):
        predict = sigmoid(np.dot(X[i, :], theta))
        # 计算预测值
        predictY.append(predict)
        if bool(np.where(predict >= .5, 1, 0)) == bool(y[i, 0]):
            # 判断预测值与实际值
            matchCount += 1
            # 累计正确数
    accuracy = matchCount / n
    # 计算正确率
    return matchCount, accuracy, predictY


def testLogRegres_my_better(X, y, theta):
    """

    :param X: vector: 1, x1, x2, ..., xn
    :param y:
    :param theta:
    :return:
    """
    m = X.shape[0]
    n = X.shape[1]

    matchCount = 0

    h = X.dot(theta).ravel()
    h = sigmoid(h)
    predictY = h

    h_g_selector = h >= 0.5
    y_g = y[h_g_selector]
    y_g_match_selector = y_g >= 0.5
    matchCount += np.sum(y_g_match_selector)

    h_l_selector = np.invert(h_g_selector)
    y_l = y[h_l_selector]
    y_l_match_selector = y_l < 0.5
    matchCount += np.sum(y_l_match_selector)

    accuracy = matchCount / m
    # 计算正确率
    return matchCount, accuracy, predictY


def transform_theta_of_logic_regression(thetaOfLogic):
    n = thetaOfLogic.shape[0]
    finalTheta = thetaOfLogic[-1].ravel()
    theta = thetaOfLogic[0:-1].ravel()
    theta = -theta / finalTheta
    theta.resize(n - 1, 1)
    return theta
