import torchvision
import torch.utils.data
import metrics
import numpy
import framework
import utils
import torch.distributed
import torch.multiprocessing
import torch.nn.parallel
from torch.distributed.elastic.multiprocessing.errors import record


@record
def train(rank, deep_learning_args):
    torch.cuda.set_device(deep_learning_args.gpus[rank])
    device = torch.device('cuda', deep_learning_args.gpus[rank])

    transform = torchvision.transforms.Compose([
        torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], inplace=True)
    ])
    train, val = utils.get_dataset(deep_learning_args, transform)

    epochs = deep_learning_args.epochs
    batch_size = deep_learning_args.batch_size
    lr = deep_learning_args.lr
    momentum = deep_learning_args.momentum
    weight_decay = deep_learning_args.weight_decay
    num_workers = deep_learning_args.num_workers

    model = framework.DeepLabV3PLUS(output_stride=16, num_classes=train.num_classes)
    model.to(device)
    # optimizer = torch.optim.SGD([{'params': model.mobile_net_v2.parameters(), 'lr': lr / 10.},
    #                              {'params': model.decoder.parameters(), 'lr': lr},
    #                              ], lr=lr,
    #                             momentum=momentum, weight_decay=weight_decay)
    utils.init_weights(model.decoder)

    # DDP model
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[deep_learning_args.gpus[rank]])
    optimizer = torch.optim.SGD(model.parameters(), lr=lr,
                                momentum=momentum, weight_decay=weight_decay)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.9)
    train_iter = torch.utils.data.DataLoader(train, batch_size=deep_learning_args.batch_size, shuffle=True,
                                             drop_last=True, num_workers=num_workers)
    val_iter = torch.utils.data.DataLoader(val, batch_size=deep_learning_args.batch_size, shuffle=False, drop_last=True,
                                           num_workers=num_workers)
    loss = torch.nn.CrossEntropyLoss(reduction='none')
    accumulator = metrics.SegClassIouAccumulator(num_classes=train.num_classes)

    if deep_learning_args.wandb:
        import wandb

        wandb.init(project='deep_learning', entity='dzcmingdi')
        wandb.config = {
            'learning_rate': lr,
            'epochs': epochs,
            'batch_size': batch_size
        }

    model.train()

    for e in range(epochs):
        loss_arr = []
        for x_cpu, y_cpu in train_iter:
            optimizer.zero_grad()
            x, y = x_cpu.to(device), y_cpu.to(device)
            yp = model(x)
            train_loss = loss(yp, y).mean(1).mean(1).sum()
            train_loss.backward()
            loss_arr.append(train_loss.detach().cpu())
            optimizer.step()

        model.eval()
        with torch.no_grad():
            train_mean_loss = torch.tensor(loss_arr)
            for x, y in val_iter:
                x, y = x.to(device), y.to(device)
                pred = model(x)
                pred = pred.cpu()
                pred = torch.argmax(pred, dim=1)
                label = y.cpu()
                accumulator.add(pred.numpy().astype(numpy.uint32), label.numpy().astype(numpy.uint32))

            results = accumulator.json()
            if deep_learning_args.wandb:
                wandb.log(
                    {'train_loss': train_mean_loss.mean(), 'train_mPA': results['accuracy'],
                     'mean_Iou': results['iou']})
            else:
                print(
                    f'epoch:{e}, train_loss: {train_mean_loss.mean()}, train_mPA: {results["accuracy"]}, mean_Iou: {results["iou"]}')
            accumulator.reset()

        model.train()
    if rank == 0:
        torch.save(model, './train.pt')

# voc2012: D:/DataFiles/Datasets/VOCdevkit/VOC2012
# ade20k: 'D:/DataFiles/Datasets/ADEChallengeData2016'
