import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
import yaml

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

# 定义高斯噪声函数
def add_gaussian_noise(tensor, mean=0.0, std=0.05):
    noise = torch.randn_like(tensor) * std + mean
    return tensor + noise

# 定义钩子函数
def hook_function(module, input, output):
    if module.training:  # 只在训练模式下添加噪声
        if hasattr(module, "weight") and module.weight is not None:
            module.weight.data = add_gaussian_noise(module.weight.data, mean=0.0, std=0.0001)
        return add_gaussian_noise(output, mean=0.0, std=0.04)
    return output

# 遍历模型并注册钩子
def register_hooks_for_layers(model, layer_types):
    """
    为模型中指定类型的层注册钩子函数
    :param model: PyTorch 模型
    :param layer_types: 一个包含层类型的元组，例如 (nn.Conv2d, nn.Linear)
    """
    for name, module in model.named_modules():
        if isinstance(module, layer_types):
            print(f"Registering hook for layer: {name} ({type(module).__name__})")
            module.register_forward_hook(hook_function)

def train(net, trainloader, device, optimizer, criterion, epoch):
    print(f'\nEpoch: {epoch}')
    net.train()
    train_loss, correct, total = 0, 0, 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                        (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))

def test(net, testloader, device, criterion):
    net.eval()
    test_loss, correct, total = 0, 0, 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                            (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
            
    acc = 100. * correct / total
    print(f'The acc: {acc}')
    return acc

def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--epoch', default=20, type=int, help='epoch')
    parser.add_argument('--dataroot', default='./dataset', 
                        type=str, help='dataset root')
    parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
    parser.add_argument('--resume', default='./checkpoint/ckpt_float_95.15.pth',
                        type=str, help='resume from checkpoint')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0
    start_epoch = 0

    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=128, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(
        testset, batch_size=100, shuffle=False, num_workers=2)

    print('==> Building float model..')
    net = ResNet18().to(device)

    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume, weights_only=True)
        net.load_state_dict(checkpoint['net'])
        # best_acc = checkpoint['acc']
        # start_epoch = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)

    print('==> Adding noise..')
    # 在Conv2d和linear的输入加入噪声
    register_hooks_for_layers(net, (nn.Conv2d, nn.Linear))

    for epoch in range(start_epoch, start_epoch + args.epoch):
        train(net, trainloader, device, optimizer, criterion, epoch)
        acc = test(net, testloader, device, criterion)
        if acc > best_acc:
            print(f'The best acc: {acc}')
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, './checkpoint/ckpt_float_noise.pth')
            best_acc = acc

        scheduler.step()
    print('best acc is:',best_acc)

if __name__ == '__main__':
    main()
