import torch
import torch.nn as nn
import torch.optim as optim


# 定义一个简单的神经网络模型
class SimpleNet(nn.Module):
    def __init__(self, input_size, num_classes):
        super(SimpleNet, self).__init__()
        self.linear = nn.Linear(input_size, num_classes)

    def forward(self, x):
        return self.linear(x)


# 假设输入特征的大小和类别数
input_size = 10  # 例如，输入特征的维度是10
num_classes = 3  # 有3个类别

# 实例化模型
model = SimpleNet(input_size, num_classes)

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)  # 使用随机梯度下降


# 训练数据生成器（这里使用随机数据作为示例）
def generate_data(batch_size, input_size, num_classes):
    inputs = torch.randn(batch_size, input_size)
    targets = torch.randint(0, num_classes, (batch_size,))
    return inputs, targets


# 训练模型
num_epochs = 10
for epoch in range(num_epochs):
    for _ in range(100):  # 假设我们每轮迭代100次
        # 模拟获取一批训练数据
        inputs, targets = generate_data(100, input_size, num_classes)

        # 前向传播
        outputs = model(inputs)

        # 计算损失
        loss = criterion(outputs, targets)

        # 反向传播和优化
        optimizer.zero_grad()  # 清空梯度
        loss.backward()  # 反向传播
        optimizer.step()  # 更新权重

    print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}")


# 测试模型
def test_model(model, test_inputs, test_targets):
    model.eval()  # 将模型设置为评估模式
    with torch.no_grad():  # 禁用梯度计算
        outputs = model(test_inputs)
        predicted = outputs.argmax(dim=1, keepdim=True)
        correct = predicted.eq(test_targets.view_as(predicted)).sum().item()
        accuracy = correct / test_targets.size(0)
        print(f"Test Accuracy: {accuracy:.4f}")


# 生成测试数据
test_inputs, test_targets = generate_data(100, input_size, num_classes)

# 测试模型
test_model(model, test_inputs, test_targets)

if __name__ == '__main__':
    print("end")
