import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib import pyplot as plt
from torch.autograd import Variable

class LinearRegression(nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = nn.Linear(1,1)
    def forward(self, x):
        out=self.linear(x)
        return out
    
model = LinearRegression()
#print(model)

"""
设置损失函数 学习率 优化器 和迭代次数
"""
num_epochs =1000
learing_rate = 1e-2
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(),lr=learing_rate)

#创建由方程y=2x+0.2产生的数据集合
x = Variable(torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1))
y = Variable(x * 2 + 0.2 + torch.rand(x.size()))
#plt.scatter(x.data.numpy(),y.data.numpy())
#plt.show()

for epoch in range(num_epochs):
    y_pred = model(x)
    loss = loss_fn(y_pred, y)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    if epoch % 200 == 0:
        print("[{}]/[{}] loss:{:.4f}".format(epoch+1, num_epochs, loss))

plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), y_pred.data.numpy(), 'r-', lw=5)

plt.text(0.5, 0 , 'Loss = %.4f' % loss.data.item(), fontdict= {'size': 20, 'color': 'red'})
plt.show()

[w,b] = model.parameters()
print(w,b)