import argparse
import yaml
from loguru import logger
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch
import os
from models.resnet import ResNet18
from tqdm import tqdm
best_acc=0.0

def merge_nested_dict(d, other):
    new = dict(d)
    for k, v in other.items():
        if d.get(k, None) is not None and type(v) is dict:
            new[k] = merge_nested_dict(d[k], v)
        else:
            new[k] = v
    return new


def get_config(default_file: str):
    p = argparse.ArgumentParser(description="Learned Step Size Quantization")
    p.add_argument(
        "--config_file", default="../config.yaml", help="path to a configuration file"
    )
    arg = p.parse_args()
    if default_file is not None:
        with open(default_file) as yaml_file:
            cfg = yaml.safe_load(yaml_file)
        return cfg
    else:
        logger.info("No default config file is provided")

    return cfg


def get_model_cifar10(args: dict):
    # 加载预训练的 ResNet 模型
    import torch.backends.cudnn as cudnn

    num_classes = args["model"]["num_classes"]
    model = ResNet18(num_classes)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)
    if device == "cuda":
        model = torch.nn.DataParallel(model)
        cudnn.benchmark = True
    logger.debug("==> loading model success..")
    logger.debug(args["resume"])
    if args["resume"] is not None:
        # Load checkpoint.
        if not os.path.exists(args["resume"]["path"]) or not os.path.isfile(
            args["resume"]["path"]
        ):
            logger.debug("==> resume path is not exist..")
            return model
        logger.debug("==> Resuming from checkpoint..")
        checkpoint = torch.load(args["resume"]["path"])
        model.load_state_dict(checkpoint["model"])
    return model


# Training
def train_one_epoch(
    total_epoch, epoch: int, model, train_loader, test_loader, optimizer, criterion
):
    # print('\nEpoch: %d' % epoch
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model.train()
    train_loss = 0
    correct = 0
    total = 0
    pbar = tqdm(total=len(train_loader))
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        inputs, targets = inputs.to(device), targets.to(device)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        pbar.update(1)
        pbar.set_description(
            f"Epoch {epoch}/{total_epoch} Loss:{(train_loss)/(batch_idx+1):.3f}|Acc {(correct/total)*100:.2f}%"
        )
    pbar.close()
    global best_acc
    model.eval()
    test_loss = 0
    correct = 0
    total = 0
    pbar = tqdm(total=len(test_loader))
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            pbar.update(1)
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            test_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
            pbar.set_description(
                f"Epoch {epoch}/{total_epoch} Loss:{(train_loss)/(batch_idx+1):.3f}|Acc {(correct/total)*100.:.2f}%"
            )
    logger.debug(f"epoch {epoch}|Acc {(correct/total)*100.:.2f}%")
    # Save checkpoint.
    acc = 100.0 * correct / total
    if acc > best_acc:
        print("Saving..")
        state = {
            "model": model.state_dict(),
            "acc": acc,
            "epoch": epoch,
        }
        if not os.path.isdir("checkpoint"):
            os.mkdir("checkpoint")
        torch.save(state, f"./checkpoint/epoch_{epoch}_model.pth")
        best_acc = acc
    pbar.close()


def train(args, model, train_loader, test_loader):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(
        model.parameters(), lr=args["model"]["lr"], momentum=0.9, weight_decay=5e-4
    )
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
    for epoch in range(args["epoch"]):
        train_one_epoch(
            args["epoch"], epoch, model, train_loader, test_loader, optimizer, criterion
        )
        scheduler.step()
