import time
import numpy as np, sklearn.preprocessing as pp, matplotlib.pyplot as plt


def sigmoid(z):
    """
    sigmoid函数
    """
    return 1 / (1 + np.exp(-z))


def model(X_X2, theta):
    """
    model函数,求 X 点乘 theta的列向量
    :param X_X2:    <ndarray> 矩阵[X, X²]
    :param theta:
    """
    return sigmoid(np.dot(X_X2, theta.T))  # .T转置


def cost(X_X2, y, theta):
    """
    损失函数 (对数的似然函数 加上负号)
    :return: tip 返回值是<Series>类型,封装了ndarray,取值用.values
    """
    left = np.multiply(-y, np.log(model(X_X2, theta)))
    right = np.multiply(y - 1, np.log(1 - model(X_X2, theta)))

    # left 和 right 是矩阵对应行求积,所以为向量
    return np.sum(left + right) / len(X_X2)


def gradient(X_X2, y, theta):
    """
    梯度函数 (对负的似然函数求偏导)
    :param X_X2:   <ndarray>,slice切片不需要.iloc
    :param y:      <DataFrame>
    :param theta:  <ndarray>
    :return:       <ndarray>
    tip: <ndarray> 和 <DataFrame> 运算后为 <DataFrame>
    """
    grad = np.zeros(theta.shape)  # 分别偏导的值分别存放

    # 当存在高次x时,theta不载是一个向量,而是矩阵,行元素修饰同一个x的不同次幂
    # imp 3.
    error = (model(X_X2, theta) - y).values.flatten()
    for j in range(len(theta.flatten())):
        grad[0, j] = np.sum(np.multiply(error, X_X2.iloc[:, j]))

    return grad / len(X_X2)


def gradient_1(X_X2, y, theta):
    """
    梯度函数 (对误差函数求偏导)
    参数解释见 gradient
    """
    grad = np.zeros(theta.shape)  # 分别偏导的值分别存放

    # imp 3
    g = model(X_X2, theta)  # g(x)
    error = (g - y).values.flatten()  # g(x)-y
    one_minus_model = (np.ones(g.shape) - g).flatten()  # 1-g(x)

    for j in range(len(theta.flatten())):
        er_g = np.multiply(error, g.flatten())  # [g(x)-y]g
        er_g_o = np.multiply(er_g, one_minus_model)  # [g(x)-y]g(1-g)
        grad[0, j] = np.sum(np.multiply(er_g_o, X_X2.iloc[:, j]))

    return grad / len(X_X2)


STOP_ITER = 0
STOP_COST = 1
STOP_GRAD = 2


def stopCriterion(stop_type, value, threshold):
    """
    三种停止迭代的策略:
        1.迭代次数
        2.两次迭代引起的theta变化很小
        3.两次迭代的梯度差不多
    """
    if stop_type == STOP_ITER:
        return value > threshold
    elif stop_type == STOP_COST:
        return abs(value[-1] - value[-2]) < threshold
    elif stop_type == STOP_GRAD:
        # print("范数",np.linalg.norm(value),threshold,np.linalg.norm(value) < threshold)
        return np.linalg.norm(value) < threshold


def shuffleData(data):
    """
    打乱数据顺序,因为获取的数据可能有规律
    :param data: <DataFrame>,slice切片需要.iloc
    :return:
    """
    np.random.shuffle(data.values)  # 打乱
    cols = data.shape[1]  # [1] represents col
    X_X2 = data.iloc[:, 0:cols - 1]  # first 'cols - 1' cols
    y = data.iloc[:, cols - 1:]  # last cols

    return X_X2, y


def descent(data, theta, batch_size, stop_type, thresh, alpha, T):
    """
    使得负的似然函数最小,即迭代找极大似然值
    :param data:
    :param theta:       <ndarray>
    :param batch_size:  1就是随机梯度,2~总样本-1,就是部分梯度
    :param stop_type:   0,1,2 三种停止迭代的方式
    :param thresh:      阈值
    :param alpha:       学习率
    :param T:           0:似然函数逼近,1:误差函数逼近
    :return:
    """
    init_time = time.time()
    i = 0  # 迭代次数
    k = 0  # batch起始数
    rows = data.shape[0]  # rows of data
    X_X2, y = shuffleData(data)
    grad = np.zeros(theta.shape)

    # print(type(cost(X_X2, y, theta)), type(cost(X_X2, y, theta).values))
    # A cost()的return是<Series>,和<DataFrame>一样,封装了<ndarray>
    #  .values就是<ndarray>
    costs = [cost(X_X2, y, theta).values]

    if T == 0:
        while True:
            grad = gradient(X_X2[k:k + batch_size], y[k:k + batch_size], theta)
            theta = theta - alpha * grad  # 参数更新
            k += batch_size  # 取batch_size个数据
            if k >= rows:
                k = 0
                X_X2, y = shuffleData(data)  # 重新洗牌

            costs.append(cost(X_X2, y, theta).values)
            i += 1

            # termination condition judgment
            if stop_type == STOP_ITER:
                value = i
            elif stop_type == STOP_COST:
                value = costs
            else:
                value = grad

            if stopCriterion(stop_type, value, thresh):
                break

    elif T == 1:
        while True:
            grad = gradient_1(X_X2[k:k + batch_size], y[k:k + batch_size], theta)
            theta -= alpha * grad  # 参数更新
            k += batch_size  # 取batch_size个数据
            if k >= rows:
                k = 0
                X_X2, y = shuffleData(data)  # 重新洗牌

            costs.append(cost(X_X2, y, theta).values)
            i += 1

            # termination condition judgment
            if stop_type == STOP_ITER:
                value = i
            elif stop_type == STOP_COST:
                value = costs
            else:
                value = grad

            if stopCriterion(stop_type, value, thresh):
                break

    return theta, i - 1, costs, grad, (time.time() - init_time)


# 执行函数
def runExp(data, theta, batch_size, stop_type, thresh, alpha, alpha_1):
    # data和theta会被更新,所以深复制
    data_1 = data.copy()
    theta_1 = theta.copy()
    costs_1 = [0.69]

    # 似然函数
    theta, ite, costs, grad, dur_time = descent(data, theta, batch_size, stop_type, thresh, alpha, 0)
    # 误差函数
    # theta_1, ite_1, costs_1, grad_1, dur_time_1 = descent(data_1, theta_1, batch_size, stop_type, thresh, alpha_1, 1)

    # region name
    name = 'original' if (data.iloc[:, 1] > 2).sum() > 1 else 'scaled'
    name += 'Data \n- learning rate: {} - '.format(alpha)  # 给定的学习速率
    if batch_size == data.shape[0]:
        str_desc_type = '完全梯度下降 Gradient'  # 完全梯度下降
    elif batch_size == 0:
        str_desc_type = '随机梯度下降 Stochastic'  # 随机梯度下降
    else:
        str_desc_type = '批量梯度下降 Mini-Batch ({})'.format(batch_size)  # 批量梯度下降
    name += str_desc_type + 'Descent \n- stop: '

    if stop_type == STOP_ITER:
        str_stop = '{} iterations'.format(thresh)  # 迭代次数满足
    elif stop_type == STOP_COST:
        str_stop = '\tcosts change < {}'.format(thresh)  # 损失满足
    else:
        str_stop = 'Current grad {}, gradient norm < {}'.format(grad, thresh)  # 梯度下降满足
    name += str_stop
    # endregion

    # costs[-1]是<Series>类型,是<DataFrame>中的一行或一列,要取出来
    print("***\n{}\n- theta: \t{} \n- theta_1: \t{} \n- iter: \t{} \n- last_cost: \t{:6.5f} \n- duration: \t{:6.5f}s\n***".
          format(name, theta, theta_1, ite, costs[-1][0], dur_time))

    fig, ax = plt.subplots(figsize=(8, 8))
    ax.plot(np.arange(len(costs)), costs, 'r', label='Likelihood function')
    # ax.plot(np.arange(len(costs_1)), costs_1, 'b', label='error function')

    ax.legend()
    ax.set_xlabel('Iteration')
    ax.set_ylabel('Cost')
    ax.set_title(name.upper() + ' - Error vs. Iteration')

    plt.rcParams['font.sans-serif'] = ['MicroSoft Yahei']  # 显示中文标签
    fig.show()

    return theta, theta_1


# 精度,分界线默认0.5:
# 标准化后的数据集,对称轴为0,而sigmoid()横轴为0处值为0.5,所以大于0.5即在正半轴
def accuracy(X_X2, y, theta, cor_sta=0.5):
    predictions = [1 if ans >= cor_sta else 0 for ans in model(X_X2, theta)]
    correct = [1 if (a == b) else 0 for (a, b) in zip(y, predictions)]  # imp 2.

    return sum(correct) / len(correct)


# 给定testData,进行预测
def custom_predict(testData, pdData, theta, cor_sta=0.5):
    testData.insert(0, 'X0', 1)
    testData.insert(3, 'X3', testData['exam1'].values * testData['exam1'].values)
    testData.insert(4, 'X4', testData['exam2'].values * testData['exam2'].values)
    testData_s = testData.copy()
    n = pdData.shape[1]

    ss = pp.StandardScaler()
    ss.fit(pdData.iloc[:, 1:n - 1])  # fit()拟合,获取dataSet的特征
    testData_s.iloc[:, 1:n - 1] = ss.transform(testData.iloc[:, 1:n - 1])  # * 100 # A 3

    predictions = [1 if ans >= cor_sta else 0 for ans in model(testData_s, theta)]
    return predictions
