import torch
import torch.nn as nn
import math
import numpy as np

# input = torch.randn(2, 5, requires_grad=True)*2000
# target = torch.randn(2, 5)*2000
# print('input is', input)
# print('target is', target)
#
#
# # loss = nn.CrossEntropyLoss()
# loss = nn.MSELoss()
# output = loss(input, target)
# print('output is', output)


import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn


def generate_data():
    y = []
    x = []
    for i in np.arange(0, 5, 0.1):
        x.append(i)
        y.append(math.cos(i) + np.random.uniform(-0.1, 0.1))
    plt.scatter(x, y)
    plt.show()
    return np.array(x), np.array(y)


class Net(nn.Module):  # 84,75
    def __init__(self):
        super(Net, self).__init__()

        self.fc = nn.Sequential(
            nn.Linear(1, 10),
            nn.ReLU(),
            nn.Linear(10, 1),
        )

    def forward(self, x):
        out = self.fc(x)
        return out


def train(net, x, y):
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01, betas=(0.90, 0.99), eps=1e-8, weight_decay=0.,
                                 amsgrad=False)
    loss_func = nn.MSELoss()
    x = torch.from_numpy(x).float()
    y = torch.from_numpy(y).float()
    for e in range(500):
        outputs = net(x.view(-1, 1, 1))
        loss = loss_func(outputs.view(-1), y.view(-1).float())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # if (e) % 10 == 0:
        #     print('epoch=', e, ',loss=', loss)
        #     test(net, x, y, e)
    test(net, x, y, e)

def test(net, x, y, e):
    predict = net(x.view(-1, 1, 1))
    y_pre = predict.view(-1).detach().numpy()
    plt.scatter(x, y, color='g')
    plt.plot(x, y_pre, color='r')
    # plt.savefig('./figure/' + str(e) + '.png')
    plt.pause(0.05)
    plt.show()

if __name__ == '__main__':
    x, y = generate_data()
    net = Net()
    train(net, x, y)