import torch  
from torch import nn  
from torch.utils.data import DataLoader, TensorDataset  
  
# 假设X_train和y_train是您的输入向量和标签  
X_train = torch.randn(100, 10)  # 示例数据：100个样本，每个样本10个特征  
y_train = torch.randint(0, 2, (100,))  # 示例标签：二分类问题  
  
# 创建数据加载器  
train_dataset = TensorDataset(X_train, y_train)  
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)  
  
# 定义模型  
class SmallModel(nn.Module):  
    def __init__(self):  
        super(SmallModel, self).__init__()  
        self.fc1 = nn.Linear(10, 5)  # 输入层到隐藏层  
        self.fc2 = nn.Linear(5, 2)   # 隐藏层到输出层  
          
    def forward(self, x):  
        x = torch.relu(self.fc1(x))  # 激活函数  
        x = self.fc2(x)  
        return x  
  
model = SmallModel()  
  
# 定义损失函数和优化器  
criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数，适用于多分类问题  
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)  # 使用Adam优化器  
  
# 训练模型  
num_epochs = 10  # 设置训练轮数  
for epoch in range(num_epochs):  
    for inputs, labels in train_loader:  
        # 前向传播  
        outputs = model(inputs)  
        loss = criterion(outputs, labels)  
          
        # 反向传播和优化  
        optimizer.zero_grad()  # 清空之前的梯度  
        loss.backward()  # 反向传播计算梯度  
        optimizer.step()  # 更新权重参数  
          
    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')  
  
# 保存模型（可选）  
torch.save(model.state_dict(), 'small_model.pth')