import torch
from torch import nn

"""线性回归 """
# 1、构造数据集
x_data = torch.Tensor([[1.0], [2.0], [3.0]])
y_data = torch.Tensor([[2.0], [4.0], [6.0]])

# 2、设计模型
class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel, self).__init__()
        """
            torch.nn.Linear(in_features: int, out_features: int, bias: bool = True):线性模型构造器
                in_features：输入样本维度
                out_features：输出样本维度 
                bias：是否启用偏置项，默认值True
        """
        self.linear = nn.Sequential(
            torch.nn.Linear(1, 1, bias=True),
            nn.ReLU(inplace=True)
        )  # 方法调用__call__

    def forward(self, x):
        y_pre = self.linear(x)
        return y_pre


model = LinearModel()
# 3、构造损失函数和优化器
criterion = torch.nn.MSELoss(reduction='sum')  # 计算损失函数模型
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)  # 优化器，对模型参数进行优化

if __name__ == "__main__":
    # 4、训练模型
    # 前馈，反馈，更新模型
    for epoch in range(100):
        y_pre = model(x_data)
        loss = criterion(y_pre, y_data)
        print("第" + str(epoch) + "次迭代，loss=" + str(loss.item()))

        optimizer.zero_grad()  # 将所有权重的梯度归零
        loss.backward()  # 反向传播
        optimizer.step()  # 权重更新

    print("w= ", model.linear[0].weight.item())
    print("b= ", model.linear[0].bias.item())
    x_test = torch.Tensor([[4.0]])
    y_test = model(x_test)
    print("y_pred = ", y_test.data)
