import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models import *
from utils import progress_bar

from witin_nn import LayerConfigFactory, HandleNegInType

from insert_observer_nodes import insert_observer_nodes
import yaml

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

def train(net, trainloader, device, optimizer, criterion, epoch):
    print(f'\nEpoch: {epoch}')
    net.train()
    train_loss, correct, total = 0, 0, 0
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                        (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))

def calibrate(net, trainloader, device):
    print(f'\nCalibration')
    net.eval()
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = net(inputs)
        progress_bar(batch_idx, len(trainloader))

def test(net, testloader, device, criterion):
    net.eval()
    test_loss, correct, total = 0, 0, 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(testloader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = net(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)' %
                            (test_loss / (batch_idx + 1), 100. * correct / total, correct, total))
            
    acc = 100. * correct / total
    print(f'The acc: {acc}')
    return acc

def get_layer_by_name(model, layer_name):
    """
    根据字符串路径动态获取模型中的嵌套层
    """
    attrs = layer_name.split('.')  # 按点号分割路径
    submodule = model
    for attr in attrs:
        # 检查是否是整数索引（处理 Sequential 等）
        if attr.isdigit():
            submodule = submodule[int(attr)]
        else:
            submodule = getattr(submodule, attr)
    return submodule
    
def main():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--epoch', default=20, type=int, help='epoch')
    parser.add_argument('--dataroot', default='./dataset', 
                        type=str, help='dataset root')
    parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
    parser.add_argument('--resume', default='./checkpoint/ckpt_float_95.15.pth',
                        type=str, help='resume from checkpoint')
    parser.add_argument('--scale_pth', default='./scale/ma_scale_noise.yaml', 
                        type=str, help='scale saving root')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    best_acc = 0
    start_epoch = 0

    print('==> Preparing data..')
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=True, download=True, transform=transform_train)
    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=512, shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR10(
        root=args.dataroot, train=False, download=True, transform=transform_test)
    testloader = torch.utils.data.DataLoader(
        testset, batch_size=200, shuffle=False, num_workers=2)

    print('==> Building float model..')
    net = ResNet18().to(device)

    if args.resume:
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume, weights_only=True)
        net.load_state_dict(checkpoint['net'])
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)

    print('==> Inserting observer..')
    # 对输入输出节点插入observer
    # HistogramObserver
    # observer = torch.quantization.observer.HistogramObserver(
    #         dtype=torch.qint8, qscheme=torch.per_tensor_affine,
    #         quant_min=-128, quant_max=127)
    # MinMaxObserver
    # observer = torch.quantization.observer.MinMaxObserver(
    #         dtype=torch.qint8, qscheme=torch.per_tensor_affine,
    #         quant_min=-128, quant_max=127)
    # MovingAverageMinMaxObserver
    observer = torch.quantization.observer.MovingAverageMinMaxObserver(
            dtype=torch.qint8, qscheme=torch.per_tensor_affine,
            quant_min=-128, quant_max=127)
    prepared_model = insert_observer_nodes(net.eval(), observer)
    prepared_model.graph.print_tabular()
    prepared_model.to(device)
    
    print('==> Observing model..')
    calibrate(prepared_model, trainloader, device)
    # torch.save(prepared_model.state_dict(), 'calbration_model.pt')
    # prepared_model.load_state_dict(torch.load('calbration_model.pt'))

    # 获取输入输出权重scale
    modules = dict(prepared_model.named_modules())
    scale_dict = {}
    for node in prepared_model.graph.nodes:
        if node.op == 'call_module':
            module_type = type(modules.get(node.target))
            if module_type == nn.Conv2d or module_type == nn.Linear or module_type == nn.BatchNorm2d:
                module = modules.get(node.target)
                scale_w = 128/torch.max(torch.abs(module.weight.data))
                scale_w = 2**(torch.log2(scale_w)).round()
                # print(scale_w)
                input_observer = modules[node.name+'_input_observer']
                scale_x = 128/torch.max(input_observer.min_val.abs(), input_observer.max_val.abs())
                scale_x = 2**(torch.log2(scale_x)).round()
                # print(scale_x)
                output_observer = modules[node.name+'_output_observer']
                scale_y = 128/torch.max(output_observer.min_val.abs(), output_observer.max_val.abs())
                scale_y = 2**(torch.log2(scale_y)).round()
                # print(scale_y)

                if node.target not in scale_dict:
                    scale_dict[node.target] = {}
                scale_dict[node.target]['scale_x']=int(scale_x)
                scale_dict[node.target]['scale_w']=int(scale_w)
                scale_dict[node.target]['scale_y']=int(scale_y)

    with open(args.scale_pth, "w") as file:
        yaml.dump(scale_dict, file, default_flow_style=False, allow_unicode=True)

if __name__ == '__main__':
    main()
