from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
import random

# CIFAR-10 类别名称
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
               'dog', 'frog', 'horse', 'ship', 'truck']

class SimplifiedResNet(nn.Module):
    def __init__(self):
        super(SimplifiedResNet, self).__init__()
        # 初始卷积层: 3x3, stride=1, padding=1, 输出 32x32x16
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(16)

        # 第一阶段: 6 层卷积 (3 个残差模块，每个模块 2 个 3x3 卷积 + 残差连接), 输出恒为 32x32x16
        self.stage1_conv1 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv1 = nn.BatchNorm2d(16)
        self.stage1_conv2 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv2 = nn.BatchNorm2d(16)
        self.stage1_conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv3 = nn.BatchNorm2d(16)
        self.stage1_conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv4 = nn.BatchNorm2d(16)
        self.stage1_conv5 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv5 = nn.BatchNorm2d(16)
        self.stage1_conv6 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1_conv6 = nn.BatchNorm2d(16)

        # 下采样: 步长 2, 输出 16x16x32
        self.downsample1 = nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(32)

        # 第二阶段: 6 层卷积 (3 个残差模块), 输出恒为 16x16x32
        self.stage2_conv1 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv1 = nn.BatchNorm2d(32)
        self.stage2_conv2 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv2 = nn.BatchNorm2d(32)
        self.stage2_conv3 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv3 = nn.BatchNorm2d(32)
        self.stage2_conv4 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv4 = nn.BatchNorm2d(32)
        self.stage2_conv5 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv5 = nn.BatchNorm2d(32)
        self.stage2_conv6 = nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2_conv6 = nn.BatchNorm2d(32)

        # 下采样: 步长 2, 输出 8x8x64
        self.downsample2 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn3 = nn.BatchNorm2d(64)

        # 第三阶段: 6 层卷积 (3 个残差模块), 输出恒为 8x8x64
        self.stage3_conv1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv1 = nn.BatchNorm2d(64)
        self.stage3_conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv2 = nn.BatchNorm2d(64)
        self.stage3_conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv3 = nn.BatchNorm2d(64)
        self.stage3_conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv4 = nn.BatchNorm2d(64)
        self.stage3_conv5 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv5 = nn.BatchNorm2d(64)
        self.stage3_conv6 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn3_conv6 = nn.BatchNorm2d(64)

        # 全局平均池化, 输出 1x1x64
        self.global_pool = nn.AdaptiveAvgPool2d((1, 1))

        # 全连接层, 输出 10 个类别
        self.fc = nn.Linear(64, 10)

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))  # 32x32x16

        # 第一阶段: 3 个残差模块
        residual = out
        out = F.relu(self.bn1_conv1(out))
        out = self.bn1_conv2(out)
        out += residual
        out = F.relu(out) # 残差链接完后再加ReLU

        residual = out
        out = F.relu(self.bn1_conv3(out))
        out = self.bn1_conv4(out)
        out += residual
        out = F.relu(out)

        residual = out
        out = F.relu(self.bn1_conv5(out))
        out = self.bn1_conv6(out)
        out += residual
        out = F.relu(out)

        out = F.relu(self.bn2(self.downsample1(out)))  # 16x16x32

        # 第二阶段: 3 个残差模块
        residual = out
        out = F.relu(self.bn2_conv1(out))
        out = self.bn2_conv2(out)
        out += residual
        out = F.relu(out)

        residual = out
        out = F.relu(self.bn2_conv3(out))
        out = self.bn2_conv4(out)
        out += residual
        out = F.relu(out)

        residual = out
        out = F.relu(self.bn2_conv5(out))
        out = self.bn2_conv6(out)
        out += residual
        out = F.relu(out)

        out = F.relu(self.bn3(self.downsample2(out)))  # 8x8x64

        # 第三阶段: 3 个残差模块
        residual = out
        out = F.relu(self.bn3_conv1(out))
        out = self.bn3_conv2(out)
        out += residual
        out = F.relu(out)

        residual = out
        out = F.relu(self.bn3_conv3(out))
        out = self.bn3_conv4(out)
        out += residual
        out = F.relu(out)

        residual = out
        out = F.relu(self.bn3_conv5(out))
        out = self.bn3_conv6(out)
        out += residual
        out = F.relu(out)

        out = self.global_pool(out)            # 1x1x64
        out = out.view(out.size(0), -1)        # 64
        out = self.fc(out)                     # 10
        return F.log_softmax(out, dim=1)

def train(args, model, device, train_loader, optimizer, epoch, train_losses, train_accs):
    model.train()
    total_loss = 0
    correct = 0
    total = 0
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
        total_loss += loss.item()
        loss.backward()
        optimizer.step()

        # 计算训练准确率
        pred = output.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
        total += target.size(0)

        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))
    avg_loss = total_loss / len(train_loader)
    train_losses.append(avg_loss)
    train_acc = 100. * correct / total
    train_accs.append(train_acc)
    print(f'Train Accuracy for Epoch {epoch}: {train_acc:.2f}%')

def test(args, model, device, test_loader, test_losses, test_accs):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    test_losses.append(test_loss)
    test_acc = 100. * correct / len(test_loader.dataset)
    test_accs.append(test_acc)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset), test_acc))
    return test_loss

def visualize_prediction(model, device, test_loader, num_images=9):
    model.eval()
    test_dataset = test_loader.dataset
    indices = random.sample(range(len(test_dataset)), num_images)
    fig, axes = plt.subplots(3, 3, figsize=(8, 8))
    axes = axes.ravel()

    for i, idx in enumerate(indices):
        image, label = test_dataset[idx]
        image_tensor = image.unsqueeze(0).to(device)
        with torch.no_grad():
            output = model(image_tensor)
            pred = output.argmax(dim=1, keepdim=True).item()
        pred_class = class_names[pred]
        true_class = class_names[label]
        image = image.numpy().transpose((1, 2, 0))  # 转换为 HWC 格式
        image = image * [0.2023, 0.1994, 0.2010] + [0.4914, 0.4822, 0.4465]  # 反归一化
        image = np.clip(image, 0, 1)  # 确保像素值在 [0, 1]
        axes[i].imshow(image)
        axes[i].set_title(f'Pred: {pred_class}, Label: {true_class}', fontsize=10, pad=5)
        axes[i].axis('off')
    plt.tight_layout()
    plt.savefig('prediction_visualization.png')
    plt.show()

def plot_losses(epochs, train_losses, test_losses):
    print(f"Train losses length: {len(train_losses)}, Test losses length: {len(test_losses)}")
    plt.figure(figsize=(10, 6))
    plt.plot(epochs, train_losses, label='Train Loss')
    plt.plot(epochs, test_losses, label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Test Loss over Epochs')
    plt.legend()
    plt.grid(True)
    plt.savefig('loss_plot.png')
    plt.show()

def plot_accuracies(epochs, train_accs, test_accs):
    print(f"Train accs length: {len(train_accs)}, Test accs length: {len(test_accs)}")
    plt.figure(figsize=(10, 6))
    plt.plot(epochs, train_accs, label='Train Accuracy')
    plt.plot(epochs, test_accs, label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Training and Test Accuracy over Epochs')
    plt.legend()
    plt.grid(True)
    plt.savefig('accuracy_plot.png')
    plt.show()

def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Example with Simplified ResNet with Residual')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=200, metavar='N',  # 修改为 200
                        help='number of epochs to train (default: 200)')
    parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
                        help='learning rate (default: 0.001)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,  # 默认保存模型
                        help='For Saving the current Model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # CIFAR-10 数据集加载，自动下载
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(32, padding=4),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    train_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10('./cifar10_data/', train=True, download=True, transform=transform_train),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.CIFAR10('./cifar10_data/', train=False, transform=transform_test),
        batch_size=args.test_batch_size, shuffle=False, **kwargs)

    model = SimplifiedResNet().to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=3)

    # 记录损失和准确率
    train_losses = []
    test_losses = []
    train_accs = []
    test_accs = []
    epochs = list(range(1, args.epochs + 1))

    # 跟踪最高测试准确率
    best_test_acc = 0.0

    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch, train_losses, train_accs)
        test_loss = test(args, model, device, test_loader, test_losses, test_accs)
        scheduler.step(test_loss)

        # 更新最高测试准确率
        current_test_acc = test_accs[-1]
        if current_test_acc > best_test_acc:
            best_test_acc = current_test_acc

    if args.save_model:
        torch.save(model.state_dict(), "cifar10_simplified_resnet_with_residual.pt")

    # 保存最高测试准确率到文件
    with open(f"{args.epochs}.txt", "w") as f:
        f.write(f"{best_test_acc:.2f}")

    visualize_prediction(model, device, test_loader, num_images=9)

    # 生成可视化图
    plot_losses(epochs, train_losses, test_losses)
    plot_accuracies(epochs, train_accs, test_accs)

if __name__ == '__main__':
    main()