import numpy as np
from matplotlib import pyplot as plt
from tensorboardX import SummaryWriter


writer = SummaryWriter('./runs/SGD')
# 回归问题的梯度下降
# 数据预处理，归一化
data = np.loadtxt(fname='./datasets/linear_data.csv', delimiter='\t')
data -= data.mean(axis=0)
data /= data.std(axis=0)

# 将训练数据和真实结果分开
x = data[:, :-1]
y = data[:, -1]
M, N = x.shape

# 参数初始化
# w = np.random.normal(size=(N,))
w = np.zeros(shape=(N,))
b = 0

# 预测，一个输入，预测回归的结果
def predict(x):
    return w.dot(x) + b

print(f'predict x[0]: {predict(x[0])}, ground truth: {y[0]}')

# 获取一对数据的loss
def get_loss(x, y):
    return (predict(x) - y) ** 2


# 获取总loss，这里取平均比较小，不能体现SGD造成的抖动
def total_loss():
    loss = 0
    for i in range(M):
        loss += get_loss(x[i], y[i])
    return loss

print(total_loss())

# 计算梯度
def get_gradient(x, y):
    global w
    global b
    eps = 1e-3
    gradient_w = np.empty(N)
    loss_before = get_loss(x,y)
    for i in range(N):
        w[i] += eps
        loss_after = get_loss(x,y)
        w[i] -= eps
        gradient_w[i] = (loss_after - loss_before) / eps

    b += eps
    loss_after = get_loss(x,y)
    gradient_b = (loss_after - loss_before) / eps
    b -= eps
    return  gradient_w, gradient_b



hyper_dict = {
    'epochs': 2500,
    'lr': 1e-3
}
x_v = [i for i in range(hyper_dict['epochs'])]
y_v = []


for epoch in range(hyper_dict['epochs']):
    i = np.random.randint(M)
    # 求梯度
    gradient_w, gradient_b = get_gradient(x[i], y[i])
    # 更新梯度
    w -= hyper_dict['lr'] * gradient_w
    b -= hyper_dict['lr'] * gradient_b
    y_v.append(total_loss())
    writer.add_scalar('total loss', total_loss(), epoch)
    if epoch % 100 == 0:
        print(f'total loss at {epoch}epoch: {total_loss()}')

plt.plot(x_v, y_v)
plt.xlabel('epochs')
plt.ylabel('total loss')
plt.show()