|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
class Net(nn.Module): |
|
def __init__(self, input_size, hidden_size, output_size): |
|
super(Net, self).__init__() |
|
self.hidden = nn.Linear(input_size, hidden_size) |
|
self.relu = nn.ReLU() |
|
self.output = nn.Linear(hidden_size, output_size) |
|
self.sigmoid = nn.Sigmoid() |
|
|
|
def forward(self, x): |
|
hidden = self.hidden(x) |
|
relu = self.relu(hidden) |
|
output = self.output(relu) |
|
output = self.sigmoid(output) |
|
return output |
|
|
|
|
|
learning_rate = 0.01 |
|
num_epochs = 1000 |
|
|
|
|
|
model = Net(input_size=2, hidden_size=5, output_size=1) |
|
|
|
|
|
model.to(device) |
|
|
|
|
|
criterion = nn.BCELoss() |
|
optimizer = optim.SGD(model.parameters(), lr=learning_rate) |
|
|
|
|
|
X_train = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]]).float().to(device) |
|
y_train = torch.tensor([[0], [1], [1], [0]]).float().to(device) |
|
|
|
|
|
for epoch in range(num_epochs): |
|
|
|
outputs = model(X_train) |
|
loss = criterion(outputs, y_train) |
|
|
|
|
|
optimizer.zero_grad() |
|
loss.backward() |
|
optimizer.step() |
|
|
|
|
|
if (epoch + 1) % 100 == 0: |
|
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, loss.item())) |
|
|
|
|
|
torch.save(model.state_dict(), 'trained model.pt') |
|
|