import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar
import yaml

from witin_nn import WitinConv2d, LayerConfigFactory, HandleNegInType

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

def train(net, trainloader, device, optimizer, criterion, epoch):
    print(f'\nEpoch: {epoch}')
    net.train()
    train_loss, correct, total = 0, 0, 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                        (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))

def calibrate(net, trainloader, device):
    print(f'\nCalibration')
    net.eval()
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = net(inputs)
        progress_bar(batch_idx, len(trainloader))

def test(net, testloader, device, criterion):
    net.eval()
    test_loss, correct, total = 0, 0, 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                            (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
            
    acc = 100. * correct / total
    print(f'The acc: {acc}')
    return acc

def get_layer_by_name(model, layer_name):
    """
    根据字符串路径动态获取模型中的嵌套层
    """
    attrs = layer_name.split('.')  # 按点号分割路径
    submodule = model
    for attr in attrs:
        # 检查是否是整数索引（处理 Sequential 等）
        if attr.isdigit():
            submodule = submodule[int(attr)]
        else:
            submodule = getattr(submodule, attr)
    return submodule
    
def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--epoch', default=5, type=int, help='epoch')
    parser.add_argument('--dataroot', default='./dataset', 
                        type=str, help='dataset root')
    parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
    parser.add_argument('--resume', default='./checkpoint/ckpt_ma_nat_noise.pth',
                        type=str, help='resume from checkpoint')
    parser.add_argument('--scale_pth', default='./scale/ma_scale_noise.yaml', 
                        type=str, help='scale saving root')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0
    start_epoch = 0

    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=128, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(
        testset, batch_size=512, shuffle=False, num_workers=2)

    print('==> Building witin model..')
    layer_config = LayerConfigFactory.get_default_config()
    layer_config.use_quantization = False
    layer_config.use_auto_scale = False
    layer_config.handle_neg_in = HandleNegInType.PN
    layer_config.noise_level = 0
    net = ResNet18(witin=True, layer_config=layer_config).to(device)

    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume, weights_only=True)
        net.load_state_dict(checkpoint['net'], strict=False)
        # best_acc = checkpoint['acc']
        # start_epoch = checkpoint['epoch']
        start_epoch = 0

    # 载入scale
    with open(args.scale_pth, "r") as file:
        scale_dict = yaml.load(file, Loader=yaml.FullLoader)
    for layer_name in scale_dict:
        scale = scale_dict[layer_name]
        layer = get_layer_by_name(net, layer_name)
        layer_ori = copy.deepcopy(layer)
        config = copy.deepcopy(layer_config)
        config.scale_x = scale['scale_x']
        config.scale_weight = scale['scale_w']
        config.scale_y = scale['scale_y']
        # 直接修改config并不奏效，需要重新初始化
        if type(layer) == WitinConv2d:
            layer.__init__(in_channels=layer.in_channels, out_channels=layer.out_channels, kernel_size=layer.kernel_size, 
                            stride=layer.stride, padding=layer.padding, bias=False, layer_config=config)
            layer.weight = layer_ori.weight
            layer.bias = layer_ori.bias if layer_ori.bias is not None else layer.bias
            layer.to(device)
        if type(layer) == WitinLinear:
            layer.__init__(in_features=layer.in_features, out_features=layer.out_features, layer_config=config)
            layer.weight = layer_ori.weight
            layer.bias = layer_ori.bias if layer_ori.bias is not None else layer.bias
        # 对bn量化有问题，不清楚问题出现在哪里
        if type(layer) == WitinBatchNorm2d:
            config.use_quantization = False
            layer.__init__(num_features=layer.num_features, layer_config=config)
            layer.weight = layer_ori.weight
            layer.bias = layer_ori.bias if layer_ori.bias is not None else layer.bias
            layer.running_mean = layer_ori.running_mean
            layer.running_var = layer_ori.running_var

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)

    acc = test(net, testloader, device, criterion)
    exit()
    for epoch in range(start_epoch, start_epoch + args.epoch):
        train(net, trainloader, device, optimizer, criterion, epoch)
        acc = test(net, testloader, device, criterion)
        if acc > best_acc:
            print(f'The best acc: {acc}')
            print('Saving..')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, './checkpoint/ckpt_ma_nat_noise.pth')
            best_acc = acc

        scheduler.step()
    print('best acc is:',best_acc)

if __name__ == '__main__':
    main()
