import numpy as np
import matplotlib.pyplot as plt

X=np.array([np.ones(100),np.random.rand(100)])  #两行100列的数据，第一列全是1，第二行是100个随机0-1的数
print(X)
y=np.dot([4,3],X)#+np.random.rand(100)  #y 为[4,3]
print(y)
plt.scatter(X[1,:],y) #X的第二行与y的关系对应图

num_iters=5

def gradient_descent(theta,X,y,num_iters):

    loss_history=np.zeros(num_iters)
    theta_history=np.zeros((num_iters,2))

    m=len(y)

    for i in range(num_iters):
        #(np.dot(theta,X)-y)：预测输出与真实输出的差
        #np.linalg.inv矩阵求逆
        theta=theta-(np.dot(theta,X)-y).dot(X.T).dot(np.linalg.inv(np.dot(X,X.T)))
        loss = 1 / (2 * m) * np.sum(np.square(np.dot(theta,X)-y))

        if i % 100 == 0:
            print("Iterating:{}".format(i))
            print("theta={}".format(theta))
            print("loss={}\n".format(loss))

        theta_history[i, :] = theta
        loss_history[i] = loss
    return theta, theta_history, loss_history

def newton_method(theta,X,y):
    #牛顿法可以一步到位，直接求出参数
    theta_newton=np.dot(y,X.T).dot(np.linalg.inv(X.dot(X.T)))
    return theta_newton

theta_init=np.random.randn(2)  #初始化2个theta
theta,theta_history,loss_history=gradient_descent(theta_init,X,y,num_iters)
print(theta)
plt.plot(loss_history)
plt.plot(theta_history)