import numpy as np


def synthetic_data(w, b, n):
    x = np.random.normal(0, 1, (n, w.shape[1]))
    y = np.matmul(w, x.T) + b
    # add noise
    y += np.random.normal(0, 0.01, y.shape)
    return x, y


def data_iter(batch_size, features, labels):
    num_examples = len(features)
    indices = list(range(num_examples))
    np.random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        batch_indices = np.mat(indices[i:min(i + batch_size, num_examples)])
        yield features[batch_indices, :], labels[:, batch_indices]


def linreg(x, w, b):
    return np.matmul(w, x.T) + b


def squared_loss(y_hat, y):
    return np.square(y_hat - y)/2


def linreg_sgd(w, b, x, y, lr, batch_size):
    # manual calculate the gradient of squared loss
    y_hat = linreg(x.squeeze(axis=0), w, b)
    grad_w = (y_hat - y.squeeze(axis=0))*x.squeeze(axis=0)
    grad_b = (y_hat - y.squeeze(axis=0))
    grad_b = grad_b.sum(axis=1)
    new_w = w-lr*grad_w/batch_size
    new_b = b-lr*grad_b/batch_size
    return new_w, new_b


def main():
    # generate datasets
    true_w = np.mat([2, -3.4])
    true_b = np.array([4.2])

    features, labels = synthetic_data(true_w, true_b, 1000)

    # load datasets
    batch_size = 10
    lr = 0.03
    epochs = 3

    # for x, y in data_iter(batch_size, features, labels):
    #     print(x, '\n', y)
    #     print(x.shape, y.shape)
    #     break

    w = np.random.normal(0, 0.01, (1, 2))
    b = np.zeros((1, 1))
    print(f'Initial w={w}, b={b}')

    net = linreg
    loss = squared_loss

    for epoch in range(epochs):
        for x, y in data_iter(batch_size, features, labels):
            w, b = linreg_sgd(w, b, x, y, lr, batch_size)
        train_loss = loss(net(features, w, b), labels)
        print(f'In epoch {epoch + 1}, loss is {float(train_loss.mean()):f}')

    print(f'estimate w is {w}, b is {b}')
    print(f'estimate error is that w is {w-true_w} and b is {b-true_b}')


if __name__ == '__main__':
    main()
