
import torch
import datetime
import argparse
from args.parse_args import get_config
from images_utils.build import build_loader
from swin_transformer_pytorch.swin_transformer import swin_t
from model_utils.optimizer import build_optimizer
from model_utils.lr_scheduler import build_scheduler
from model_utils.logger import create_logger
from model_utils.utils import load_checkpoint, save_checkpoint, get_grad_norm, auto_resume_helper, reduce_tensor
import time
from model_utils.utils import accuracy,AverageMeter
import os
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import numpy as np

def parse_option():
    parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
    parser.add_argument("--c",help="add config ", default=None)
    parser.add_argument("--local_rank", type=int, required=False, help='local rank for DistributedDataParallel')

    config, unparsed = parser.parse_known_args()

    config = get_config(config.c)

    return config

def main(config):
    logger = create_logger(output_dir=config.OUTPUT, dist_rank=1, name=f"{config.MODEL_NAME}")
    dataset_train, dataset_val, data_loader_train, data_loader_val = build_loader(config)
    model =swin_t()
    optimizer = build_optimizer(config, model)
    if config.cuda ==1:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False)
        model_without_ddp = model.module
    else:
        model_without_ddp = model.module
    n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    lr_scheduler = build_scheduler(config, optimizer, len(data_loader_train))
    criterion = torch.nn.CrossEntropyLoss()
    max_accuracy = 0.0

    logger.info("Start training")

    for epoch in range(config.START_EPOCH, config.EPOCHS):
        data_loader_train.sampler.set_epoch(epoch)
        train_one_epoch(config, model, criterion, data_loader_train, optimizer, epoch, lr_scheduler)
        if (epoch % config.SAVE_FREQ == 0 or epoch == (config.EPOCHS - 1)):
            save_checkpoint(config, epoch, model_without_ddp, max_accuracy, optimizer, lr_scheduler, logger)

        acc1, acc5, loss = validate(config, data_loader_val, model)
        logger.info(f"Accuracy of the network on the {len(dataset_val)} test images: {acc1:.1f}%")
        max_accuracy = max(max_accuracy, acc1)
        logger.info(f'Max accuracy: {max_accuracy:.2f}%')
@torch.no_grad()
def validate(config, data_loader, model):
    logger = create_logger(output_dir=config.OUTPUT, dist_rank=1, name=f"{config.MODEL_NAME}")
    criterion = torch.nn.CrossEntropyLoss()
    model.eval()

    batch_time = AverageMeter()
    loss_meter = AverageMeter()
    acc1_meter = AverageMeter()
    acc5_meter = AverageMeter()

    end = time.time()
    for idx, (images, target) in enumerate(data_loader):
        if config.cuda ==1:
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

        # compute output
        output = model(images)

        # measure accuracy and record loss
        loss = criterion(output, target)
        acc1, acc5 = accuracy(output, target, topk=(1, 5))

        acc1 = reduce_tensor(acc1)
        acc5 = reduce_tensor(acc5)
        loss = reduce_tensor(loss)

        loss_meter.update(loss.item(), target.size(0))
        acc1_meter.update(acc1.item(), target.size(0))
        acc5_meter.update(acc5.item(), target.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if idx % config.PRINT_FREQ == 0:
            memory_used=1
            if config.cuda ==1:
                memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
            logger.info(
                f'Test: [{idx}/{len(data_loader)}]\t'
                f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
                f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
                f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})\t'
                f'Mem {memory_used:.0f}MB')
    logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
    return acc1_meter.avg, acc5_meter.avg, loss_meter.avg


def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, logger, lr_scheduler):
    model.train()
    optimizer.zero_grad()

    num_steps = len(data_loader)
    batch_time = AverageMeter()
    loss_meter = AverageMeter()
    norm_meter = AverageMeter()

    start = time.time()
    end = time.time()
    for idx, (samples, targets) in enumerate(data_loader):
        if config.cuda==1:
            samples = samples.cuda(non_blocking=True)
            targets = targets.cuda(non_blocking=True)

        outputs = model(samples)

        if config.TRAIN.ACCUMULATION_STEPS > 1:
            loss = criterion(outputs, targets)
            loss = loss / config.TRAIN.ACCUMULATION_STEPS
            loss.backward()
            if config.CLIP_GRAD:
                grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.CLIP_GRAD)
            else:
                grad_norm = get_grad_norm(model.parameters())
            if (idx + 1) % config.ACCUMULATION_STEPS == 0:
                optimizer.step()
                optimizer.zero_grad()
                lr_scheduler.step_update(epoch * num_steps + idx)
        else:
            loss = criterion(outputs, targets)
            optimizer.zero_grad()
            loss.backward()
            if config.CLIP_GRAD:
                grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), config.CLIP_GRAD)
            else:
                grad_norm = get_grad_norm(model.parameters())
            optimizer.step()
            lr_scheduler.step_update(epoch * num_steps + idx)
        if config.cuda:
            torch.cuda.synchronize()

        loss_meter.update(loss.item(), targets.size(0))
        norm_meter.update(grad_norm)
        batch_time.update(time.time() - end)
        end = time.time()

        if idx % config.PRINT_FREQ == 0:
            lr = optimizer.param_groups[0]['lr']
            memory_used = 1
            if config.cuda==1:
                memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)

            etas = batch_time.avg * (num_steps - idx)
            logger.info(
                f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
                f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f}\t'
                f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
                f'loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
                f'grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f})\t'
                f'mem {memory_used:.0f}MB')
    epoch_time = time.time() - start
    logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")


if __name__ == '__main__':
    config = parse_option()

    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
        rank = int(os.environ["RANK"])
        world_size = int(os.environ['WORLD_SIZE'])
        print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
    else:
        rank = -1
        world_size = -1
    if config.cuda==1:
        torch.cuda.set_device()
        torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
        torch.distributed.barrier()

    seed = config.SEED
    torch.manual_seed(seed)
    np.random.seed(seed)
    cudnn.benchmark = True
    get_world_size= 1
    if config.cuda==1:
        get_world_size = dist.get_world_size()
    # linear scale the learning rate according to total batch size, may not be optimal
    linear_scaled_lr = config.BASE_LR * config.BATCH_SIZE * get_world_size/ 512.0
    linear_scaled_warmup_lr = config.WARMUP_LR * config.BATCH_SIZE * get_world_size / 512.0
    linear_scaled_min_lr = config.MIN_LR * config.BATCH_SIZE * get_world_size / 512.0
    # gradient accumulation also need to scale the learning rate
    if config.ACCUMULATION_STEPS > 1:
        linear_scaled_lr = linear_scaled_lr * config.ACCUMULATION_STEPS
        linear_scaled_warmup_lr = linear_scaled_warmup_lr * config.ACCUMULATION_STEPS
        linear_scaled_min_lr = linear_scaled_min_lr * config.ACCUMULATION_STEPS
    config.defrost()
    config.BASE_LR = linear_scaled_lr
    config.WARMUP_LR = linear_scaled_warmup_lr
    config.MIN_LR = linear_scaled_min_lr
    config.freeze()

    os.makedirs(config.OUTPUT, exist_ok=True)
    logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL_NAME}")
    if config.cuda:
        if dist.get_rank() == 0:
            path = os.path.join(config.OUTPUT, "config.json")
            with open(path, "w") as f:
                f.write(config.dump())
            logger.info(f"Full config saved to {path}")
    path = os.path.join(config.OUTPUT, "config.json")
    with open(path, "w") as f:
        f.write(config.dump())
    logger.info(f"Full config saved to {path}")


    # print config
    logger.info(config.dump())

    main(config)

