import torch
import torch.nn as nn
import torch.optim as optim

import numpy as np


# 搭建神经网络模型
class Model(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(Model, self).__init__()
        self.layer = nn.Sequential(
            nn.Linear(in_features=input_dim, out_features=output_dim)
        )

    def forward(self, x):
        out = self.layer(x)
        return out


if __name__ == '__main__':
    # 训练数据处理
    train_set = []
    for i in range(100):
        train_set.append([i, 2 * i + 1])

    x_train = np.array([[s[0]] for s in train_set], dtype=np.float32)
    y_train = np.array([[s[1]] for s in train_set], dtype=np.float32)

    # 训练部分
    epoches = 5000
    learning_rate = 0.01

    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    model = Model(1, 1).to(device)

    # 定义损失函数
    criterion = nn.MSELoss()
    # 定义优化器
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    for epoch in range(epoches):
        inputs = torch.from_numpy(x_train).to(device)
        targets = torch.from_numpy(y_train).to(device)

        # 梯度清零，否则将对上一次的迭代结果进行累加
        optimizer.zero_grad()

        # 根据 inputs 和 model 计算出outputs
        outputs = model(inputs)

        # 计算 loss 值并进行后向传播
        loss = criterion(outputs, targets)
        loss.backward()

        # 根据优化器对参数进行优化
        optimizer.step()

        # 以 50 个epoch为单位输出当前 loss 值
        if epoch % 50 == 0:
            print('epoch:{}, loss: {}'.format(epoch, loss.item()))

    # 保存模型，此处保存模型的状态字典
    torch.save(model.state_dict(), 'linear.pt')

    # 实例化模型并加载之前训练好的模型参数
    pre_model = Model(1, 1)
    pre_model.load_state_dict(torch.load('linear.pt'))

    # 预测数据
    predicted = pre_model(torch.from_numpy(np.array([[200], [201]], dtype=np.float32)).requires_grad_()).data.numpy()
    print(predicted)
