import random
import torch
from d2l import torch as d2l
from data_generate import generate_data

features,labels = generate_data()
batch_size = 10
def data_iter(batch_size,features,labels):
    n = features.shape[0]
    indices = list(range(n))
    random.shuffle(indices)
    for i in range(0,n,batch_size):
        batch_indice = torch.tensor(indices[i:min(i+batch_size,n)])
        yield features[batch_indice],labels[batch_indice]

w = torch.normal(0,0.01,size=(2,1),requires_grad=True)
b = torch.zeros(1,requires_grad=True)
def linearReg(X,w,b):
    return torch.matmul(X,w) + b

def squared_loss(y_hat,y,batch_size):
    return (1/2)*sum((y_hat - y.reshape(y_hat.shape))**2)/batch_size

def gradient_descent(params,alpha):
    with torch.no_grad():
        for param in params:
            param -= alpha * param.grad
            param.grad.zero_()

epochs = 3
alpha = 0.03
loss_func = squared_loss
net = linearReg

def train(features,labels,epochs,alpha):
    for epoch in range(epochs):
        for X,y in data_iter(batch_size,features,labels):
            y_hat = net(X,w,b)
            loss = loss_func(y_hat,y,batch_size)
            loss.backward()
            gradient_descent([w,b],alpha)
        with torch.no_grad():
            train_l = loss_func(net(features,w,b),labels,labels.shape[0])
            print('epoch:',epoch,'loss:',train_l)

train(features,labels,epochs,alpha)

