Spaces:
Sleeping
Sleeping
import torch | |
import torch.nn as nn | |
import torch.optim as optim | |
import matplotlib.pyplot as plt | |
import numpy as np | |
# π₯ Deep Neural Network Model | |
class DeepNeuralNetwork(nn.Module): | |
def __init__(self, input_size, hidden_sizes, output_size): | |
super(DeepNeuralNetwork, self).__init__() | |
layers = [] | |
in_size = input_size | |
for hidden_size in hidden_sizes: | |
layers.append(nn.Linear(in_size, hidden_size)) | |
layers.append(nn.ReLU()) | |
in_size = hidden_size | |
layers.append(nn.Linear(in_size, output_size)) | |
self.model = nn.Sequential(*layers) | |
def forward(self, x): | |
return self.model(x) # β Apply the model layers properly | |
# π₯ Training Function | |
def train_model(model, criterion, optimizer, x_train, y_train, epochs=100): | |
model.train() | |
for epoch in range(epochs): | |
optimizer.zero_grad() | |
# Forward pass | |
y_pred = model(x_train) | |
# Loss calculation | |
loss = criterion(y_pred, y_train) | |
# Backward pass | |
loss.backward() | |
optimizer.step() | |
if (epoch + 1) % 10 == 0: | |
print(f'Epoch [{epoch + 1}/{epochs}], Loss: {loss.item():.4f}') | |
# β Example Usage | |
if __name__ == "__main__": | |
# π₯ Sample Data | |
x_train = torch.randn(100, 10, requires_grad=True) # β Require gradient tracking | |
y_train = torch.randint(0, 2, (100,), dtype=torch.long) # β Ensure LongTensor for CrossEntropyLoss | |
# Plotting the input data | |
plt.scatter(x_train[:, 0].detach().numpy(), x_train[:, 1].detach().numpy(), c=y_train.numpy(), cmap='viridis') | |
plt.title('Deep Neural Network Input Data') | |
plt.xlabel('Input Feature 1') | |
plt.ylabel('Input Feature 2') | |
plt.colorbar(label='Output Class') | |
plt.show() | |
# Initialize Model | |
model = DeepNeuralNetwork(input_size=10, hidden_sizes=[20, 10], output_size=2) | |
# Criterion and Optimizer | |
criterion = nn.CrossEntropyLoss() | |
optimizer = optim.SGD(model.parameters(), lr=0.01) | |
# Train the model | |
train_model(model, criterion, optimizer, x_train, y_train, epochs=100) | |
# β Plotting the predictions with softmax | |
model.eval() | |
with torch.no_grad(): | |
y_pred = torch.softmax(model(x_train), dim=1).detach().numpy() | |
plt.scatter(x_train[:, 0].detach().numpy(), x_train[:, 1].detach().numpy(), c=np.argmax(y_pred, axis=1), cmap='viridis') | |
plt.title('Deep Neural Network Predictions') | |
plt.xlabel('Input Feature 1') | |
plt.ylabel('Input Feature 2') | |
plt.colorbar(label='Predicted Class') | |
plt.show() | |