import math
import matplotlib.pyplot as plt
import numpy as np


def generate_data(sample_num: int, u: float, sigma: float, start: float = 0.0, end: float = 1.0):
    """
    给函数f在[start,end)上生成sample_num个加上了均值为u,标准差为sigma的高斯噪声的数据
    :param start:开始坐标
    :param end:结束坐标
    :param sample_num:样本个数
    :param u:均值
    :param sigma:标准差
    :return:x_train, y_train
    """
    x_train = np.arange(start, end, (end - start) / sample_num)
    gauss_noise = np.random.normal(u, sigma, sample_num)
    y_train = np.sin(x_train * 2 * math.pi) + gauss_noise
    return x_train, y_train


def draw_picture(x_train, y_train, w, start: float = 0.0, end: float = 1.0, title: str = 'picture'):
    """
    绘图
    :param x_train: 一维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param w: 多项式函数的权值, f(x) = w[len(w)-1],...,w[0]; w[0]表示常数项
    :param start: 开始坐标
    :param end: 结束坐标
    :return: 无
    """
    plt.axis([start, end, -2, 2])
    x = np.arange(start, end, 0.01)
    f = np.poly1d(w[::-1])
    print('<<<多项式f(x)如下>>>')
    print(f)
    plt.plot(x, np.sin(x * 2 * math.pi))
    plt.scatter(x_train, y_train, c='red', marker='v')
    plt.plot(x, f(x))
    plt.title(title)
    plt.ylabel('y')
    plt.xlabel('x')
    plt.legend(['y_train', 'y_prediction'], loc='upper right')
    plt.show()


def analytical_solution(x_train, y_train, degree: int, l: float = 0.0):
    """
    解析解方法
    :param x_train: 一维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param degree: 多项式次数
    :param l: 二范数系数
    :return: 多项式权重; w[0]表示常数项
    """
    X, Y = generateXandY(x_train, y_train, degree)
    return np.array((np.linalg.inv(X.T * X + np.eye(degree + 1) * l) * X.T * Y).T)[0]


def gradient_descent(x_train, y_train, degree: int, learning_rate: float, epochs: int, l: float = 0.0):
    """
    梯度下降法
    :param x_train: 一维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param degree: 多项式次数
    :param learning_rate: 学习率
    :param epochs: 迭代次数
    :param l: 二范数系数
    :return: 多项式权重; w[0]表示常数项
    """
    X, Y = generateXandY(x_train, y_train, degree)
    w = np.ones([degree + 1, 1])
    abs1 = 100
    while abs1 > 1e-10:
        gradient = X.T * X * w - X.T * Y + l * w
        abs1 = gradient.T * gradient
        print(w[:10])
        w = w - learning_rate * gradient
    return np.array(w.T)[0]


def conjugate_gradient(x_train, y_train, degree: int, learning_rate: float, epochs: int, epsilon: float = 1e-5,
                       l: float = 0.0):
    """
    共轭梯度法
    :param x_train: 一维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param degree: 多项式次数
    :param learning_rate: 学习率
    :param epochs: 迭代次数
    :param epsilon: 最小误差
    :param l: 二范数系数
    :return: 多项式权重; w[0]表示常数项
    """
    X, Y = generateXandY(x_train, y_train, degree)
    Q = X.T * X + np.eye(degree + 1) * l
    b = X.T * Y
    w = np.ones([degree + 1, 1])
    r = b - Q * w
    p = r
    while r.T*r > epsilon:
        a = (r.T * r) / (p.T * Q * p)
        pre_r = r
        w = w + p * a
        r = r - Q * p * a
        b = (r.T * r) / (pre_r.T * pre_r)
        p = r + p * b
    return np.array(w.T)[0]


def generateXandY(x_train, y_train, degree):
    """
    重构x_train,y_train
    :param x_train: 一维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param degree: 多项式次数
    :return: X,Y；其中X为n*(degree+1)维矩阵，Y为n*1维矩阵
    """
    X = [[1 for _ in range(len(x_train))]]
    for i in range(1, degree + 1):
        X.append([x ** i for x in x_train])
    X = np.mat(X).T
    Y = np.mat(y_train).T
    return X, Y


if __name__ == '__main__':
    u = 0.0
    sigma = 0.1

    sample_num = 10
    degree = 10
    l = 0

    learning_rate = 4e-1
    epochs = 10000

    x_train, y_train = generate_data(sample_num, u, sigma)

    # print("\033[0;34;40-m---analytical_solution_without_punish---\033[0m")
    # w = analytical_solution(x_train, y_train, degree)
    # draw_picture(x_train, y_train, w, title='analytical_solution_without_punish')

    print("\033[0;34;40-m---analytical_solution_with_punish---\033[0m")
    w = analytical_solution(x_train, y_train, degree, l=l)
    draw_picture(x_train, y_train, w, title='analytical_solution_with_punish l={}'.format(l))

    # print("\033[0;34;40-m---gradient_descent_without_punish---\033[0m")
    # w = gradient_descent(x_train, y_train, degree, learning_rate, epochs)
    # draw_picture(x_train, y_train, w, title='gradient_descent_without_punish')

    print("\033[0;34;40-m---gradient_descent_with_punish---\033[0m")
    w = gradient_descent(x_train, y_train, degree, learning_rate, epochs, l=l)
    draw_picture(x_train, y_train, w, title='gradient_descent_with_punish l={}'.format(l))

    # print("\033[0;34;40-m---conjugate_gradient_without_punish---\033[0m")
    # w = conjugate_gradient(x_train, y_train, degree, learning_rate, epochs)
    # draw_picture(x_train, y_train, w, title='conjugate_gradient_without_punish')

    print("\033[0;34;40-m---conjugate_gradient_with_punish---\033[0m")
    w = conjugate_gradient(x_train, y_train, degree, learning_rate, epochs, l=l)
    draw_picture(x_train, y_train, w, title='conjugate_gradient_with_punish l={}'.format(l))
