import numpy as np

# 创建数据集
np.random.seed(30)
X = 2*np.random.rand(100,1)
Y = 4 + 3*X +np.random.randn(100,1)
X_b = np.c_[np.ones((100,1)),X]
# print(f"X_b{X_b[96:104]}") # 索引超出时取到最后一位就不取了
"""
n = [1, 4, 5, 8, 24, 56, 25]
print(X_b[n])

》》[[1.         0.76149698]
 [1.         1.92521563]
 [1.         0.69332368]
 [1.         1.17138854]
 [1.         1.80502769]
 [1.         1.69062489]
 [1.         0.57287066]]
# 取X_b中对应n索引的数组组成新的数组
"""


# 设置超参数
n_epochs = 10000
m = 100
#learning_rate = 0.001
batch_size = 5
num_batches = int(m/batch_size)
t0,t1=5,500 # 元组，t0,t1不可改

# 初始化θ
theta = np.random.randn(2,1)

# 定义一个函数来调整学习率
def learning_rate_schedule(t):
    return t0/(t+t1)

# 4.判断是否收敛，一般不会去设置阈值，而是采用设置相对大的迭代次数保证可以收敛
for epoch in range(n_epochs):
    # 在双层for循环之间，每个轮次开始分批次迭代之前打乱数据索引顺序
    arr = np.arange(m)
    np.random.shuffle(arr) # 对arr随机排列
    X_b = X_b[arr]
    Y = Y[arr]
    # X_b和Y的样本要一一对应
    for i in range(num_batches):

        # 2.求解随机到的单个样本的cost梯度
        random_index = np.random.randint(num_batches) # 有放回的采样
        x_batch = X_b[random_index*batch_size:random_index*batch_size+batch_size]
        y_batch = Y[random_index*batch_size:random_index*batch_size+batch_size]
        gradient = x_batch.T.dot(x_batch.dot(theta)-y_batch)
        """
        单步计算梯度
        gradient = X_b.T.dot(X_b.dot(theta)-Y)  # gradient为2x1的列向量
        print(f"gradient0:{gradient}")
        """
        # 3.应用梯度下降法的公式去调整θ值θt+1=θt-α*gradient
        learning_rate = learning_rate_schedule(epoch*num_batches+i)
        theta = theta-learning_rate*gradient

print(f"theta:{theta}")