import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp

'''基于解析方法估计参数的LinearRegression'''
'''多元线性回归'''

# from sklearn.datasets import load_boston
# boston=load_boston()
# data=pd.DataFrame(boston.data,columns=boston.feature_names)
# target=pd.DataFrame(boston.target)
# X=np.matrix(data.values)
# y=np.matrix(target.values)
# y=y.T
# theta = np.matrix(np.array([0,0]))
# print(X.shape, theta.shape, y.shape)

'''设置字体的更多属性'''
font={'family':'SimHei',
      'weight':'bold',
      'size':16}
plt.rc('font',**font)
'''解决坐标轴负轴的符号显示的问题'''
plt.rc('axes',unicode_minus=False)

def real_func(x):
      return np.sin(2*np.pi*x)

def fit_func(p,x):
      f=np.poly1d(p)# ps: numpy.poly1d([1,2,3])  生成  $1x^2+2x^1+3x^0$*
      return f(x)

def residuals_func(p,x,y):
      ret=fit_func(p,x)-y
      return ret

x=np.linspace(0,1,11)
x_points = np.linspace(0, 1, 1000)
# 加上正态分布噪音的目标函数的值
y_ = real_func(x)
y = [np.random.normal(0, 0.1)+y1 for y1 in y_]

plt.title("")
plt.plot(x,y_,'g-*')
plt.plot(x,y,'bo')

#最小二乘法（least sqaure method）
#AX=C
#x'=(A.TA)^(-1)*A.TC
def leastsq(n,x,y):
      C=np.array(y).T
      X=x
      for i in range(2,n):
          X=np.column_stack((pow(x,i),X))
      X = np.insert(X, n-1, [1], 1) #多插入一列，用于+b
      A=X
      # p_init=np.insert(p_init, len(p_init), [1])
      # print('x:',X)
      # print('p_init:',p_init)
      # print(X*p_init)
      # print('y:',C)
      # print(X .dot(p_init))
      A_=np.linalg.inv(A.T.dot(A))#求A.T.dot(A)的逆
      para=A_.dot(A.T.dot(C)).T
      return  para

#岭回归（ridge regression）
#x'=(A.TA+laumda*I)^(-1)*A.TC
def ridge(n, x, y,laumda):
    C = np.array(y).T
    X = x
    for i in range(2, n):
        X = np.column_stack((pow(x, i), X))
    X = np.insert(X, n - 1, [1], 1)  # 多插入一列，用于+b
    A = X
    # p_init=np.insert(p_init, len(p_init), [1])
    # print('x:', X)
    # print('p_init:',p_init)
    # print(X*p_init)
    # print('y:', C)
    # print(A.T.dot(A))
    # print(A.T.dot(A)+laumda*np.eye(A.shape[1]))
    A_ = np.linalg.inv(A.T.dot(A)+laumda*np.eye(A.shape[1]))  # 求A.T.dot(A)的逆
    para = A_.dot(A.T.dot(C)).T
    return para

def RSS(p,x,y):
    pre_y=fit_func(p,x)
    return np.mean((pre_y-y)**2)

def VAR(p,x,y):
    ave_x=x.mean()
    #equal
    # print(((x-ave_x)**2).sum())
    # print((x - ave_x) .dot((x - ave_x).T))
    #equal
    # print(((x-ave_x)**2).sum())
    # print(np.sum((x-ave_x)**2))
    return (((x-ave_x)**2).sum()/(x.shape[0]-1))

def COV(p,x,y):
    ave_x=x.mean()
    ave_y=np.array(y).mean()
    #equal
    print((x - ave_x) .dot((y - ave_y).T))
    print(np.multiply((x - ave_x),((y - ave_y))))

    return ((x - ave_x) .dot((y - ave_y).T))/(x.shape[0]-1)

def fitting(M=0):
    """
    n 为 多项式的次数
    """
    # 随机初始化多项式参数
    p_init = np.random.rand(M+1)
    # 最小二乘法
    p_lsq1=leastsq(M+1, x, y)
    plt.plot(x_points, fit_func(p_lsq1, x_points), label='my lsq fitted curve')
    # p_lsq = sp.optimize.leastsq(residuals_func, p_init, args=(x, y))
    # plt.plot(x_points, fit_func(p_lsq[0], x_points), label='fitted curve')
    print("RSS:%.4f"%RSS(p_lsq1,x,y))
    print("VAR:%.4f"%VAR(p_lsq1, x, y))
    print("np.var:%.4f" % np.var(x, ddof=1))
    print("COV:%.4f"%COV(p_lsq1, x, y))
    print("np.cov:%.4f" % np.cov(np.matrix(x), np.matrix(y))[0][1])


    rid=ridge(M+1,x,y,0.001)
    plt.plot(x_points, fit_func(rid, x_points), label='my ridge fitted curve')
    print(rid)

    # 可视化
    plt.plot(x_points, real_func(x_points), label='real')

    plt.plot(x, y, 'bo', label='noise')
    plt.legend()
    return p_lsq1

fitting(5)

plt.show()

'''dan元线性回归'''
# from sklearn.linear_model import LinearRegression
# model=LinearRegression()
# model.fit(np.matrix(x).reshape(-1,1),y)
# test_x=np.array([[1]])
# predixt_y=model.predict(test_x)[0]
# print("w:%.2f,b:%.2f"%(model.coef_,model.intercept_))
# print("predict_y:%.2f real_y:%e"%(predixt_y,np.array(real_func(test_x))[0]))

# '''Loss损耗函数算法代码，theta.T转置，矩阵计算'''
# def MSE(X,y,theta):
#       l=len(y)
#       inner=np.power(((X*theta.T)-y),2)
#
# '''批量梯度下降算法
#  该函数通过执行梯度下降算法次数来更新theta值，每次迭代次数跟学习率有关
#    函数参数说明:
#    X :代表特征/输入变量
#    y:代表目标变量/输出变量
#    theta：线性回归模型的两个系数值（h(x)=theta(1)+theta(2)*x）
#    alpha：学习率
#    iters：迭代次数'''
# def gradientDescent(X,y,theta,alpha,iters):
#       tmp=np.matrix(np.zeros(theta.shape))
#       para=int(theta.ravel().shape[1])# ravel()方法将数组维度拉成一维数组,shape[1]输出列数
#       cost=np.zeros(iters)
#
#       for i in range(iters):
#             error=(X*theta.T)-y
#             for j in range(para):
#                   term = np.multiply(error, X[:,j])#数组和矩阵对应位置相乘，输出与相乘数组 / 矩阵的大小一致