import torch
from torch.autograd import Variable
import numpy as np

torch.manual_seed(777)  # for reproducibility

x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

# 创建一个隐藏层，里面有三个神经元，每层用sigmoid函数激活

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

x = Variable(torch.from_numpy(x_data)).to(device)
y = Variable(torch.from_numpy(y_data)).to(device)

model = torch.nn.Sequential(
    torch.nn.Linear(2, 3),
    torch.nn.Sigmoid(),
    torch.nn.Linear(3, 1),
    torch.nn.Sigmoid()
).to(device)

cross_entropy = torch.nn.BCELoss()# Binary cross entropy

opti = torch.optim.Adam(model.parameters(), lr=0.01)

for i in range(2001):
    h = model(x)
    loss = cross_entropy(h, y)
    loss.backward() # 梯度累加
    opti.step()
    opti.zero_grad()
    if i % 100 == 0:
        print(i, loss.data.cpu().numpy())

# 计算准确率
pre = (model(x)>0.5).float()
acc = (pre==y).float().mean()
print(acc.data.cpu().numpy())

