# 问题1：优化随机和小批量取数时有可能有些样本永远取不到的问题
# 解决方法，每次取数前将样本顺序打乱

# 问题2：如何可以通过代码让学习率随着迭代次数增多而逐渐变小？
# 解决办法有很多：
# 1.每次迭代按固定比例减小学习率 2.学习率按指数函数衰减 3.学习率与迭代次数成反比
# 4.在特定迭代次数时手动调整学习率 5.学习率按余弦函数周期性衰减（适合更复杂的优化）
# 下面以学习率与迭代次数成反比为例

import numpy as np
import matplotlib.pyplot as plt

X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.rand(100, 1)
X_c = np.c_[np.ones((100, 1)), X]

# 超参数
m = 1000
n = 100
batch_size = 10
num_batches = int(n / batch_size)


# 学习率调整
def learning_schedule(t, i, d):
    return t / (1 + d * i)


# 初始学习率
initial_learning_rate = 0.1
decay_rate = 0.01  # 衰减系数
theta = np.random.rand(2, 1)

loss_history = []

for d in range(m):
    learning_rate = learning_schedule(initial_learning_rate, decay_rate, d)
    for i in range(num_batches):
        arr = np.arange(len(X))
        np.random.shuffle(arr)
        X_c = X_c[arr]
        y = y[arr]
        X_batch = X_c[i * batch_size:i * batch_size + batch_size]
        y_batch = y[i * batch_size:i * batch_size + batch_size]
        gradients = X_batch.T.dot(X_batch.dot(theta) - y_batch)
        theta = theta - learning_rate * gradients
    loss = np.mean((X_c.dot(theta) - y) ** 2)
    loss_history.append(loss)
print(theta)
plt.plot(loss_history)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training Loss")
plt.show()
