import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim
class XorNet(nn.Module):
  def __init__(self):
    super().__init__()
    self.fc1 = nn.Linear(2,10)
    self.fc2 = nn.Linear(10,1)
  def forward(self, x):
    x = F.relu(self.fc1(x))
    x = self.fc2(x)
    return x

model = XorNet()
loss_fn = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
epochs = 500


X = np.array([[0.,0.],[1.,1.],[0.,1.],[1.,0.]])
Y = np.array([0.,0.,1.,1.])

y_train_t = torch.from_numpy(Y).clone().reshape(-1, 1)
x_train_t = torch.from_numpy(X).clone()

history = []


for i in range(epochs):
  for batch_ind in range(4):
    # wrap the data in variables
    x_batch = Variable(torch.Tensor(x_train_t.float()))
    y_batch = Variable(torch.Tensor(y_train_t.float()))
    # forward pass
    y_pred = model(x_batch)
    # compute and print loss
    loss = loss_fn(y_pred, y_batch)
    print(i, loss.data)
    # reset gradients
    optimizer.zero_grad()
    # backwards pass
    loss.backward()
    # step the optimizer - update the weights
    optimizer.step()


v = Variable(torch.FloatTensor([1,0]))
model(v)


v = Variable(torch.FloatTensor([1,1]))
model(v)