import torch
import torch.nn as nn
from torch.optim import SGD

# 定义一个device
# device =torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available())
device =torch.device("cuda")
print(device)

# 0.准备数据
x = torch.rand([500,1]).to(device)
y_true = 3*x+0.8

# 1.定义模型


class MyLinear(nn.Module):

    def __init__(self):
        # super(MyLinear,self).__init__()
        super().__init__()
        self.linear = nn.Linear(1,1)
        self.linear1 = nn.Linear(1,1)

    def forward(self, x):
        out = self.linear(x)
        out = self.linear1(out)
        return out


# 2.实例化模型，优化实例化，loss 实例化
my_linear = MyLinear().to(device)



optimizer = SGD(my_linear.parameters(),0.001)

loss_fn = nn.MSELoss()

# 3.循环，进行梯度下降，参数更新

for i in range(50000):

    # 得到预测值
    y_predict = my_linear(x)

    # print(y_predict)
    # break
    loss = loss_fn(y_predict,y_true)
    # 梯度置为零
    optimizer.zero_grad()

    loss.backward()
    # 参数更新
    optimizer.step()

    if i%50 ==0:
        params = list(my_linear.parameters())
        # 打印损失和权重
        # print(loss.item(),params[0].item(),params[1].item())
        for i in params:
            print(i.item(),end = " ")
        print("")
# 打印损失和权重

print("* "*50)

# my_linear.to("cpu")
# print(my_linear(torch.tensor([5.])))