import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
from sklearn.preprocessing import PolynomialFeatures
warnings.filterwarnings("ignore")
# 数据准备
# X0是一维数组表示特征值
X0 = np.array([25,28,31,35,38,40])
# x是通过x0前面添加一个全为1的列向量
x = np.column_stack((np.ones((X0.shape[0],1)),X0))
y = np.array([[106],[145],[167],[208],[233],[258]])

print(X0)
print(x)
print(y)
# sys.exit()

# 初始化参数
theta = np.zeros((2,1))

# 定义代价函数
def cost(theta):
    m = y.size
    # 预测值
    y_hat = x.dot(theta)
    J = 1.0 / (2 * m) * np.square(y_hat - y).sum()
    return J

# 定义批量梯度下降函数   (还有随机梯度下降，小批量梯度下降)
def gradientDescent(x,y,theta,alpha=0.01,iters=1500):
    m=y.size
    costs = []
    for i in range(iters):
        sum_gradient = np.zeros(shape=theta.shape,dtype=float)
        for index in range(len(x)):
            y_pred = x[index:index+1].dot(theta)
            sum_gradient+=x[index:index+1].T*(y_pred-y[index])
        theta-=alpha*sum_gradient*(1.0/m)
        c=cost(theta)
        costs.append(c)

    return costs




# 求解方法
def fsolve(x,y):
    X=np.mat(x)
    Y=np.mat(y)
    theta = (X.T*X).I*X.T*Y
    return theta

poly =PolynomialFeatures(degree=2,interaction_only=True,include_bias=False)
X_poly = poly.fit_transform(x)

if __name__ == "__main__":
    costs = gradientDescent(x,y,theta=theta,alpha=0.0009,iters=1085000)
    # theta=fsolve(x,y)
    print("theta=",theta.ravel())
    print("cost=",cost(theta))