import numpy as np
from matplotlib import pyplot as plt
from tensorboardX import SummaryWriter

writer = SummaryWriter('./runs/GD')

# 回归问题的梯度下降
# 数据预处理，归一化
data = np.loadtxt(fname='./datasets/linear_data.csv', delimiter='\t')
data -= data.mean(axis=0)
data /= data.std(axis=0)

# 将训练数据和真实结果分开
x = data[:, :-1]
y = data[:, -1]
M, N = x.shape

# 参数初始化
# w = np.random.normal(size=(N,))
w = np.zeros(shape=(N,))
b = 0

# 预测，一个输入，预测回归的结果
def predict(x):
    return w.dot(x) + b

print(f'predict x[0]: {predict(x[0])}, ground truth: {y[0]}')


# 定义MSELoss函数
def get_loss():
    loss = 0
    for i in range(M):
        loss += (predict(x[i]) - y[i]) ** 2
    return loss / M


# 定义求梯度函数
def get_gradient():
    global w
    global b
    eps = 1e-3
    loss_before = get_loss()

    gradient_w = np.empty(N)
    for i in range(N):
        w[i] += eps
        loss_after = get_loss()
        w[i] -= eps
        gradient_w[i] = (loss_after - loss_before) / eps
    b += eps
    loss_after = get_loss()
    gradient_b = (loss_after - loss_before) / eps
    return gradient_w, gradient_b

print(f'gradient example:{get_gradient()}')

hyper_dict = {
    'epochs': 2500,
    'lr': 0.005
}

x_v = [i for i in range(hyper_dict['epochs'])]
y_v = []

for i in range(hyper_dict['epochs']):
    gradient_w, gradient_b = get_gradient()
    w -= gradient_w * hyper_dict['lr']
    b -= gradient_b * hyper_dict['lr']

    y_v.append(get_loss())
    writer.add_scalar('total loss', get_loss()*M, i)
    if i % 50 == 0:
        print(f'loss = {get_loss()}')

plt.plot(x_v, y_v)
plt.xlabel('epochs')
plt.ylabel('loss')
plt.show()
print(f'predict x[0]: {predict(x[10])}, ground truth: {y[10]}')