import numpy as np
import matplotlib.pyplot as plt
'''
X:
1,1,1,...,1
0.16098067,0.61170377,0.67705979,...,0.54568303
'''
# z=[[[1,2],[3,4]],[[1,2],[3,4]]]
# nz=np.array(z)
# print(nz.ndim)

X=np.array([np.ones(100),np.random.rand(100)])  #两行100列的数据，第一列全是1，第二行是100个随机0-1的数
print(X)
y=np.dot([4,3],X)#+np.random.rand(100)  #y 为[4,3]
print(y)
plt.scatter(X[1,:],y) #X的第二行与y的关系对应图

alpha=0.01  #学习率
num_iters=1000   #迭代次数

def gradient_descent(theta,X,y,alpha,num_iters):
    """

    :param theta:  所求的线性模型的参数变量
    :param X: 输入的数据集X
    :param y: 真实值
    :param alpha: 学习率
    :param num_iters: 迭代次数
    :return:
    """
    loss_history=np.zeros(num_iters)   #损失函数变化
    theta_history=np.zeros((num_iters,2))  #theta变化
    m=len(y)  #数据点的个数

    for i in range(num_iters):
        y_pred=np.dot(theta,X)  #预测值
        theta=theta-alpha/m *np.dot(y_pred-y,X.T)
        loss=1/(2*m)*np.sum(np.square(y_pred-y))

        if i %100==0:
            print("Iterating:{}".format(i))
            print("theta={}".format(theta))
            print("loss={}\n".format(loss))

        theta_history[i,:]=theta
        loss_history[i]=loss
    return theta,theta_history,loss_history

theta_init=np.random.randn(2)  #初始化2个theta
theta,theta_history,loss_history=gradient_descent(theta_init,X,y,alpha,num_iters)
print(theta)
plt.plot(loss_history)
plt.plot(theta_history)