
###类的定义

n_input = X.shape[1] # Must match the shape of the input features
n_hidden1 = 8 # Number of neurons in the 1st hidden layer
n_hidden2 = 4 # Number of neurons in the 2nd hidden layer
n_output = 1 # Number of output units (for example 1 for binary classifiction)\

class Network(nn.Module):
  def __init__(self):
    super(Network, self).__init__()
    # Inputs to the 1st hidden Layer linear transformation
    self.hidden1 = nn.Linear(n_input,n_hidden1)
    # Inputs to the 2nd hidden Layer linear transformation
    self.hidden2 = nn.Linear(n_hidden1,n_hidden2)
    # Activation function for the hidden Layers' output - ReLU
    self.relu = nn.ReLU()
    # Output layer linear transformation
    self.output = nn.Linear(n_hidden2, n_output)
    # Activation function for the output Layers' output - sigmoid
    self.sigmoid = nn.Sigmoid()

    def forward(self, X, %%kwargs):
      # Passes the input tensor through each of the defined operations
      X = self.hidden1(X)
      X = self.relu(X)
      X = self.hidden2(X)
      X = self.relu(X)
      X = self.output(X)
      X = self.sigmoid(X)
      return X

model = Network()
print(model)


###损失函数，优化器与训练
criterion = nn.BCELoss() # Binary cross-entrophy loss

logits = model.forward(X) # Output of the forward pass (logits i.e. probabilities)

loss = criterion(logits, y)

from torch import optim
optimizer = optim.SGD(model.parameter(),lr = 0.1)


# Resets the gradients i.e. do not accumulate over Passes
optimizer.zero_grad()
# Forward Pass
output = model.forward(X)
# Calculate loss
loss = criterion(output, X)
# Backward pass (AutoGrad)
loss.backward()
# One step of the optimizer
optimizer.step()


###训练多个周期
epochs  = 10
for i,e in enumerate(range(epochs)):
  optimizer.zero_grad() # Reset the gradients
  output = model.forward(X) # Forward Pass
  loss = criterion(output, X) # Calculate loss
  print(f"Epoch - {i+1}, Loss - {round(item(),3)}") # print Loss
  loss.backward() # Backpropagation
  optimizer.step() # Optimizer one step

###看看频率随时间变化
for i,e in enumerate(range(epochs)):
  optimizer.zero_grad()
  output = model.forward(X)
  loss = criterion(output, X)
  #print(f"Epoch - {i+1}, Loss - {round(item(),3)}")
  loss.backward()
  optimizer.step()
  running_loss.append(loss.item())
  if i != 0 and (i+1)%20 == 0:
    logits = model.forward(X).detach().numpy().flatten()
    plt.figure(figsize = (15,3))
    plt.title("Output probabilities after {} epochs".format(i+1))
    plt.bar([i for i in range(100)], height = logits)
    plt.show()

###自定义损失函数
def mean_quartic_error(output,target):
  """
  Computes 4-th power loss
  """
  loss = torch.mean((output - target)**4)
  return loss

for i,e in enumerate(range(epochs)):
  optimizer.zero_grad()
  output = reg_model.forward(X)
  loss = mean_quartic_error(output, X)
  #print(f"Epoch - {i+1}, Loss - {round(item(),3)}")
  loss.backward()
  optimizer.step()
  running_loss.append(loss.item())
