# import torch
#
# x=torch.arange(4,dtype=torch.float)
# x.requires_grad_(True)
# y = 2*torch.dot(x,x)
# y.backward()
#
# # ';x.grad.zero_()
# y = x.sum()
# y.backward()
# print(x.grad)
import matplotlib
from jedi.api.refactoring import inline


# import random
# import torch
# from d2l import torch as d2l
#
# def synthetic_data(w,b,num_examples):
#     """生成y = Xw+b+噪声  """
#     x = torch.normal(0,1,(num_examples,len(w)))
#     y = torch.matmul(x,w) + b
#     y += torch.normal(0,0.01,y.shape)
#     return x , y.reshape(-1,1)
#
# def data_iter(batch_size,features,labels):
#     num_examples = len(features)
#     indices = list(range(num_examples))
#     random.shuffle(indices)
#     for i in range(0, num_examples,batch_size):
#         batch_indices=torch.tensor(indices[i:min(i+batch_size,num_examples)])
#         yield features[batch_indices],labels[batch_indices]
#
# def linreg(x , w, b):
#     return torch.matmul(x,w)+b
#
# def squared_loss(y_hat,y):
#     return (y_hat-y.reshape(y_hat.shape))**2 / 2
#
# def SGD(params,lr,batch_size):
#     with torch.no_grad():
#         for param in params:
#             param -= lr*param.grad / batch_size
#             param.grad.zero_()
#
#
#
# true_w= torch.tensor([2,-3.4])
# true_b = 4.2
# features , labels = synthetic_data(true_w,true_b,1000)
#
#
# w = torch.normal(0,0.01,size=(2,1),requires_grad=True)
# b = torch.zeros(1 , requires_grad=True)
#
# batch_size = 10
# lr=0.03
# num_epochs = 3
# net = linreg
# loss = squared_loss
#
# for epoch in  range(num_epochs):
#     for x,y in data_iter(batch_size,features,labels):
#         l = loss(net(x,w,b),y)
#         l.sum().backward()
#         SGD([w,b], lr, batch_size)
#     with torch.no_grad():
#         train_l = loss(net(features,w,b),labels)
#         print(f"epoch{epoch+1},loss{float(train_l.mean())}")

import numpy as np
import torch
from torch.utils import data
from d2l import torch as d2l
from torch import nn
def load_array(data_arrays,batch_size,is_train=True):
    dataset= data.TensorDataset(*data_arrays)
    return data.DataLoader(dataset,batch_size,shuffle=is_train)

true_w = torch.tensor([2,-3.4])
true_b = 4.2
features , labels = d2l.synthetic_data(true_w,true_b, 1000)

batch_size = 10
data_iter = load_array((features,labels),batch_size)

net = nn.Sequential(nn.Linear(2,1))
net[0].weight.data.normal_(0,0.01)
net[0].bias.data.fill_(0)
loss = nn.MSELoss()
trainer = torch.optim.SGD(net.parameters(),lr=0.03)

num_epochs = 3
for epoch in range(num_epochs):
    for x , y in data_iter:
        l = loss(net(x),y)
        trainer.zero_grad()
        l.backward()
        trainer.step()

    l = loss(net(features),labels)
    print(f"epoch{epoch+1},loss{l}")

