import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import DataLoader, TensorDataset

# 设置随机种子，确保实验可复现
torch.manual_seed(42)
np.random.seed(42)


# 1. 定义卷积神经网络模型
class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()

        # 卷积层1：输入通道=3（RGB图像），输出通道=16，卷积核大小=3x3，填充=1（保持尺寸不变）
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, padding=1)
        # ReLU激活函数
        self.relu = nn.ReLU()
        # 最大池化层：窗口大小=2x2，步长=2（尺寸减半）
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

        # 卷积层2：输入通道=16，输出通道=32
        self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, padding=1)

        # 全连接层1：输入维度=32x8x8（假设图像经过两次池化后尺寸为8x8），输出维度=128
        self.fc1 = nn.Linear(32 * 8 * 8, 128)
        # 全连接层2：输出维度=10（假设是10分类任务）
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        # 第一层卷积 + ReLU + 池化
        x = self.conv1(x)
        x = self.relu(x)
        x = self.pool(x)

        # 第二层卷积 + ReLU + 池化
        x = self.conv2(x)
        x = self.relu(x)
        x = self.pool(x)

        # 展平特征图，输入全连接层
        x = x.view(x.size(0), -1)  # [batch_size, 32 * 8 * 8]
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)

        return x


# 2. 生成随机数据
def generate_random_data(num_samples=1000, image_size=32):
    # 生成随机图像数据（范围[0, 1]），形状=[num_samples, 3, image_size, image_size]
    images = torch.rand(num_samples, 3, image_size, image_size)
    # 生成随机标签（10分类）
    labels = torch.randint(0, 10, (num_samples,))
    return images, labels


# 3. 训练函数
def train_model(model, train_loader, criterion, optimizer, num_epochs=5):
    model.train()  # 设置为训练模式
    for epoch in range(num_epochs):
        running_loss = 0.0
        for i, (inputs, labels) in enumerate(train_loader):
            # 清零梯度
            optimizer.zero_grad()

            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, labels)

            # 反向传播 + 优化
            loss.backward()
            optimizer.step()

            # 打印统计信息
            running_loss += loss.item()
            if i % 100 == 99:  # 每100个batch打印一次
                print(f"Epoch [{epoch + 1}/{num_epochs}], Batch [{i + 1}], Loss: {running_loss / 100:.4f}")
                running_loss = 0.0


# 4. 测试函数
def model_test(model, test_loader):
    model.eval()  # 设置为评估模式
    correct = 0
    total = 0
    with torch.no_grad():  # 不计算梯度
        for inputs, labels in test_loader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    print(f"Test Accuracy: {accuracy:.2f}%")


# 5. 主函数
def main():
    # 生成训练集和测试集
    train_images, train_labels = generate_random_data(num_samples=1000)
    test_images, test_labels = generate_random_data(num_samples=200)

    # 创建DataLoader
    train_dataset = TensorDataset(train_images, train_labels)
    test_dataset = TensorDataset(test_images, test_labels)
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

    # 初始化模型、损失函数和优化器
    model = SimpleCNN()
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失（多分类）
    optimizer = optim.Adam(model.parameters(), lr=0.001)  # Adam优化器

    # 训练模型
    print("Starting training...")
    train_model(model, train_loader, criterion, optimizer, num_epochs=5)

    # 测试模型
    print("\nTesting model...")
    model_test(model, test_loader)


if __name__ == "__main__":
    main()