import torch
import torch.nn as nn
import torch.optim as optim

import numpy as np


class Model(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(Model, self).__init__()
        self.linear = nn.Linear(in_features=input_dim, out_features=output_dim)

    def forward(self, x):
        x = self.linear(x)
        return x


if __name__ == '__main__':
    # y = 2*x + 1
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    train_set = []
    for i in range(100):
        train_set.append([i, 2*i + 1])

    x_train = np.array([[s[0]] for s in train_set], dtype=np.float32)
    y_train = np.array([[s[1]] for s in train_set], dtype=np.float32)

    # training
    epochs = 8000
    learning_rate = 0.001

    net = Model(1, 1).to(device)

    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)

    for epoch in range(epochs):
        inputs = torch.from_numpy(x_train).to(device)
        targets = torch.from_numpy(y_train).to(device)

        optimizer.zero_grad()
        outputs = net(inputs)

        loss = criterion(outputs, targets)
        loss.backward()

        optimizer.step()

        if epoch % 50 == 0:
            print('epoch:{}, loss: {}'.format(epoch, loss.item()))

    # torch.save(net.state_dict(), 'test.pt')

