|
import anndata |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from torch.utils.data import DataLoader, TensorDataset |
|
import numpy as np |
|
|
|
|
|
num_samples = 2000 |
|
dimension = 1280 |
|
|
|
|
|
X = np.random.randn(num_samples, dimension) |
|
|
|
|
|
|
|
|
|
|
|
|
|
W = np.random.randn(dimension, dimension) * 0.1 |
|
b = np.random.randn(dimension) * 0.1 |
|
noise = np.random.randn(num_samples, dimension) * 0.05 |
|
Y = X @ W + b + noise |
|
|
|
|
|
X_tensor = torch.tensor(X, dtype=torch.float32) |
|
Y_tensor = torch.tensor(Y, dtype=torch.float32) |
|
dataset = TensorDataset(X_tensor, Y_tensor) |
|
dataloader = DataLoader(dataset, batch_size=32, shuffle=True) |
|
|
|
|
|
class LinearModel(nn.Module): |
|
def __init__(self): |
|
super(LinearModel, self).__init__() |
|
self.linear = nn.Linear(1280, 1280) |
|
|
|
def forward(self, x): |
|
return self.linear(x) |
|
|
|
|
|
class NeuralNetwork(nn.Module): |
|
def __init__(self, hidden_dim=512): |
|
super(NeuralNetwork, self).__init__() |
|
self.network = nn.Sequential( |
|
nn.Linear(1280, hidden_dim), |
|
nn.ReLU(), |
|
nn.Linear(hidden_dim, hidden_dim), |
|
nn.ReLU(), |
|
nn.Linear(hidden_dim, 1280) |
|
) |
|
|
|
def forward(self, x): |
|
return self.network(x) |
|
|
|
|
|
|
|
model = NeuralNetwork() |
|
|
|
|
|
criterion = nn.MSELoss() |
|
optimizer = optim.Adam(model.parameters(), lr=0.001) |
|
|
|
|
|
num_epochs = 50 |
|
for epoch in range(num_epochs): |
|
total_loss = 0 |
|
for inputs, targets in dataloader: |
|
|
|
outputs = model(inputs) |
|
loss = criterion(outputs, targets) |
|
|
|
|
|
optimizer.zero_grad() |
|
loss.backward() |
|
optimizer.step() |
|
|
|
total_loss += loss.item() |
|
|
|
|
|
if (epoch + 1) % 5 == 0: |
|
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {total_loss/len(dataloader):.4f}') |
|
|
|
|
|
def predict(input_vector): |
|
model.eval() |
|
with torch.no_grad(): |
|
input_tensor = torch.tensor(input_vector, dtype=torch.float32) |
|
return model(input_tensor).numpy() |
|
|