#1.利用解析解计算线性回归方程的系数
# a = inv((x_T*x))*x_T*z
# x:相关的变量 s*n s为行数，n为列数
# z:因变量 s*1
# a:系数 n*1

import numpy as np
import math

def linear_regression(x, z):
    x_rows, x_cols = x.shape
    z_rows, z_cols = z.shape
    if x_rows!= z_rows:
        print("自变量和因变量的行数不一致")
        return None
    if x_cols==1:
        x = np.hstack((np.ones(x_rows).reshape(x_rows,1),x))
    a = np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, z))
    return a

#2.利用梯度下降法求解线性回归方程的系数
def linear_regression_gd(x, z, alpha, max_iter,tol):
    x_rows,x_cols = x.shape
    x = np.hstack((np.ones(x_rows).reshape(x_rows,1),x))
    x_T = x.T
    z_T = z.T
    # a的初始值
    a = np.array([[120,1]])
    # a = np.array([[1,1,1,1,1,1,1]])
    a = a.T
    diff = math.inf
    # 定义迭代次数
    i = 0
    while diff > tol :
        #  f_y_pre = transpose (z)*z -2*transpose (z)*x*y + transpose (y)*transpose (x)*x*y;
        f_y_pre = z_T@z - 2*z_T@x@a + a.T@x_T@x@a
        a = a - alpha@x_T@(-z+x@a)
        f_y = z_T@z - 2*z_T@x@a + a.T@x_T@x@a
        diff = np.linalg.norm(f_y - f_y_pre)
        i = i+1
        if i > max_iter:
            print("迭代次数：",i) 
            break      
    return a
  
