import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
torch.manual_seed(2)


X = torch.Tensor([[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5]])
Y = torch.Tensor([1,2,3,4,5]).view(-1,1)
# X, Y


class Simple(nn.Module):
  def __init__(self, activation=F.relu, input_dim = 3, output_dim=1):
    super(Simple, self).__init__()
    # 修改前self.lin1 = nn.Linear(input_dim, 2)
    #这里的输出维度与后面的输入维度不统一，以高纬度为训练
    self.lin1 = nn.Linear(input_dim, 16)
    self.hidden = nn.Linear(16, 2)
    self.lin2 = nn.Linear(2, output_dim)
    self.activation = activation
    self.weights_init()

  def forward(self, x):
    x = self.lin1(x)
    x = self.activation(x)
    x = self.hidden(x)
    x = self.activation(x)
    x = self.lin2(x)
    return x

  def weights_init(self):
    for m in self.modules():
      if isinstance(m, nn.Linear):
        # initialize the weight tensor, here we use a normal distribution
        m.weight.data.normal_(0, 1)

model = Simple()



loss_func = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)

epochs = 200
steps = X.size(0)
for i in range(epochs):
    for j in range(steps):
        # data_point = np.random.randint(X.size(0))
        x_var = Variable(X[j], requires_grad=False)
        y_var = Variable(Y[j], requires_grad=False)

        optimizer.zero_grad()
        y_hat = model(x_var)
        loss = loss_func.forward(y_hat, y_var)
        loss.backward()
        optimizer.step()

    if i % 500 == 0:
        error = loss.data.numpy()
        print(f"Epoch: {i}, Loss: {error}, ")


#修改前会出现警告信息 model(torch.tensor(X[0]))
model(X[0])
print(X[0])