import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torchvision.models import resnet18
from torchvision.models.resnet import ResNet18_Weights
import time

# 打印环境信息
print(f"PyTorch version: {torch.__version__}")
print(f"Torchvision version: {torchvision.__version__}")
print(f"CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
    print(f"CUDA device: {torch.cuda.get_device_name(0)}")
    print(f"CUDA memory: {torch.cuda.get_device_properties(0).total_memory / 1024 ** 3:.2f} GB")

# 设置随机种子，确保结果可复现
torch.manual_seed(42)

# 数据增强和预处理
transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


# 简化版ResNet18，减少内存占用
class CustomResNet18(nn.Module):
    def __init__(self, num_classes=10):
        super(CustomResNet18, self).__init__()
        print("加载预训练的ResNet18模型...")
        base_model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1)

        # 冻结前几层以减少计算量
        for param in list(base_model.parameters())[:10]:
            param.requires_grad = False

        self.conv1 = base_model.conv1
        self.bn1 = base_model.bn1
        self.relu = base_model.relu
        self.maxpool = base_model.maxpool
        self.layer1 = base_model.layer1
        self.layer2 = base_model.layer2
        self.layer3 = base_model.layer3
        self.layer4 = base_model.layer4
        self.avgpool = base_model.avgpool
        self.fc = nn.Linear(base_model.fc.in_features, num_classes)
        self.dropout = nn.Dropout(0.5)

        print(f"模型参数数量: {sum(p.numel() for p in self.parameters() if p.requires_grad):,}")

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.dropout(x)
        x = self.fc(x)
        return x


def main():
    try:
        # 加载数据集
        print("加载训练集...")
        train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                                     download=True, transform=transform)
        print("加载测试集...")
        test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                                    download=True, transform=transform)

        print(f"训练集大小: {len(train_dataset)} 张图像")
        print(f"测试集大小: {len(test_dataset)} 张图像")

        # 创建数据加载器
        batch_size = 32  # 减小batch size以降低内存需求
        print(f"使用batch size: {batch_size}")

        train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
                                                   shuffle=True, num_workers=0)  # Windows上设为0
        test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
                                                  shuffle=False, num_workers=0)  # Windows上设为0

        # 设备配置
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {device}")

        # 创建模型实例
        model = CustomResNet18().to(device)

        # 定义损失函数和优化器
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.001)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True)

        # 训练模型
        num_epochs = 20
        best_accuracy = 0.0
        print(f"开始训练，共 {num_epochs} 个epochs")

        for epoch in range(num_epochs):
            start_time = time.time()

            # 训练阶段
            model.train()
            running_loss = 0.0
            correct = 0
            total = 0

            print(f"\nEpoch {epoch + 1}/{num_epochs}")
            print("-" * 50)
            print("训练中...")

            for batch_idx, (images, labels) in enumerate(train_loader):
                images, labels = images.to(device), labels.to(device)

                optimizer.zero_grad()
                outputs = model(images)
                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                running_loss += loss.item()
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

                # 每100个batch打印一次进度
                if (batch_idx + 1) % 100 == 0:
                    batch_progress = (batch_idx + 1) / len(train_loader) * 100
                    avg_loss = running_loss / (batch_idx + 1)
                    acc = 100. * correct / total
                    print(
                        f"Batch {batch_idx + 1}/{len(train_loader)} ({batch_progress:.1f}%) | Loss: {avg_loss:.4f} | Acc: {acc:.2f}%")

            # 计算平均损失和准确率
            avg_train_loss = running_loss / len(train_loader)
            train_accuracy = 100. * correct / total

            # 验证阶段
            model.eval()
            val_loss = 0
            val_correct = 0
            val_total = 0

            print("验证中...")
            with torch.no_grad():
                for batch_idx, (images, labels) in enumerate(test_loader):
                    images, labels = images.to(device), labels.to(device)
                    outputs = model(images)
                    loss = criterion(outputs, labels)

                    val_loss += loss.item()
                    _, predicted = outputs.max(1)
                    val_total += labels.size(0)
                    val_correct += predicted.eq(labels).sum().item()

                    # 每50个batch打印一次进度
                    if (batch_idx + 1) % 50 == 0:
                        print(f"验证 Batch {batch_idx + 1}/{len(test_loader)}")

            # 计算验证指标
            avg_val_loss = val_loss / len(test_loader)
            val_accuracy = 100. * val_correct / val_total

            # 更新学习率
            scheduler.step(avg_val_loss)

            # 记录时间
            epoch_time = time.time() - start_time

            # 打印本轮结果
            print(f"\nEpoch {epoch + 1}/{num_epochs} 完成 | 耗时: {epoch_time:.1f}s")
            print(f"训练 Loss: {avg_train_loss:.4f} | 训练 Acc: {train_accuracy:.2f}%")
            print(f"验证 Loss: {avg_val_loss:.4f} | 验证 Acc: {val_accuracy:.2f}%")

            # 保存最佳模型
            if val_accuracy > best_accuracy:
                best_accuracy = val_accuracy
                torch.save(model.state_dict(), './best_optimized_cnn.pth')
                print(f"保存最佳模型 (准确率: {best_accuracy:.2f}%)")

        # 最终测试
        print("\n执行最终测试...")
        model.load_state_dict(torch.load('./best_optimized_cnn.pth'))
        model.eval()

        class_correct = [0] * 10
        class_total = [0] * 10

        with torch.no_grad():
            for images, labels in test_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predicted = outputs.max(1)
                c = (predicted == labels).squeeze()

                for i in range(len(labels)):
                    label = labels[i]
                    class_correct[label] += c[i].item() if len(c) > i else 0
                    class_total[label] += 1

        # 打印每类的准确率
        print("\n各类别的准确率:")
        for i in range(10):
            print(f'{classes[i]:<8}: {100 * class_correct[i] / class_total[i]:.2f}%')

        print(f"\n最终模型在测试集上的准确率: {best_accuracy:.2f}%")
        print("模型已保存至 ./best_optimized_cnn.pth")

    except Exception as e:
        print(f"程序执行出错: {str(e)}")
        import traceback
        traceback.print_exc()


if __name__ == '__main__':
    mp.freeze_support()
    main()